1 /* 2 * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 #include "gc_interface/collectedHeap.hpp" 26 #include "memory/allocation.hpp" 27 #include "memory/binaryTreeDictionary.hpp" 28 #include "memory/freeList.hpp" 29 #include "memory/collectorPolicy.hpp" 30 #include "memory/filemap.hpp" 31 #include "memory/freeList.hpp" 32 #include "memory/gcLocker.hpp" 33 #include "memory/metachunk.hpp" 34 #include "memory/metaspace.hpp" 35 #include "memory/metaspaceGCThresholdUpdater.hpp" 36 #include "memory/metaspaceShared.hpp" 37 #include "memory/metaspaceTracer.hpp" 38 #include "memory/resourceArea.hpp" 39 #include "memory/universe.hpp" 40 #include "runtime/atomic.inline.hpp" 41 #include "runtime/globals.hpp" 42 #include "runtime/init.hpp" 43 #include "runtime/java.hpp" 44 #include "runtime/mutex.hpp" 45 #include "runtime/orderAccess.inline.hpp" 46 #include "services/memTracker.hpp" 47 #include "services/memoryService.hpp" 48 #include "utilities/copy.hpp" 49 #include "utilities/debug.hpp" 50 51 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 52 53 typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary; 54 typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary; 55 56 // Set this constant to enable slow integrity checking of the free chunk lists 57 const bool metaspace_slow_verify = false; 58 59 size_t const allocation_from_dictionary_limit = 4 * K; 60 61 MetaWord* last_allocated = 0; 62 63 size_t Metaspace::_compressed_class_space_size; 64 const MetaspaceTracer* Metaspace::_tracer = NULL; 65 66 // Used in declarations in SpaceManager and ChunkManager 67 enum ChunkIndex { 68 ZeroIndex = 0, 69 SpecializedIndex = ZeroIndex, 70 SmallIndex = SpecializedIndex + 1, 71 MediumIndex = SmallIndex + 1, 72 HumongousIndex = MediumIndex + 1, 73 NumberOfFreeLists = 3, 74 NumberOfInUseLists = 4 75 }; 76 77 enum ChunkSizes { // in words. 78 ClassSpecializedChunk = 128, 79 SpecializedChunk = 128, 80 ClassSmallChunk = 256, 81 SmallChunk = 512, 82 ClassMediumChunk = 4 * K, 83 MediumChunk = 8 * K 84 }; 85 86 static ChunkIndex next_chunk_index(ChunkIndex i) { 87 assert(i < NumberOfInUseLists, "Out of bound"); 88 return (ChunkIndex) (i+1); 89 } 90 91 volatile intptr_t MetaspaceGC::_capacity_until_GC = 0; 92 uint MetaspaceGC::_shrink_factor = 0; 93 bool MetaspaceGC::_should_concurrent_collect = false; 94 95 typedef class FreeList<Metachunk> ChunkList; 96 97 // Manages the global free lists of chunks. 98 class ChunkManager : public CHeapObj<mtInternal> { 99 friend class TestVirtualSpaceNodeTest; 100 101 // Free list of chunks of different sizes. 102 // SpecializedChunk 103 // SmallChunk 104 // MediumChunk 105 // HumongousChunk 106 ChunkList _free_chunks[NumberOfFreeLists]; 107 108 // HumongousChunk 109 ChunkTreeDictionary _humongous_dictionary; 110 111 // ChunkManager in all lists of this type 112 size_t _free_chunks_total; 113 size_t _free_chunks_count; 114 115 void dec_free_chunks_total(size_t v) { 116 assert(_free_chunks_count > 0 && 117 _free_chunks_total > 0, 118 "About to go negative"); 119 Atomic::add_ptr(-1, &_free_chunks_count); 120 jlong minus_v = (jlong) - (jlong) v; 121 Atomic::add_ptr(minus_v, &_free_chunks_total); 122 } 123 124 // Debug support 125 126 size_t sum_free_chunks(); 127 size_t sum_free_chunks_count(); 128 129 void locked_verify_free_chunks_total(); 130 void slow_locked_verify_free_chunks_total() { 131 if (metaspace_slow_verify) { 132 locked_verify_free_chunks_total(); 133 } 134 } 135 void locked_verify_free_chunks_count(); 136 void slow_locked_verify_free_chunks_count() { 137 if (metaspace_slow_verify) { 138 locked_verify_free_chunks_count(); 139 } 140 } 141 void verify_free_chunks_count(); 142 143 public: 144 145 ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size) 146 : _free_chunks_total(0), _free_chunks_count(0) { 147 _free_chunks[SpecializedIndex].set_size(specialized_size); 148 _free_chunks[SmallIndex].set_size(small_size); 149 _free_chunks[MediumIndex].set_size(medium_size); 150 } 151 152 // add or delete (return) a chunk to the global freelist. 153 Metachunk* chunk_freelist_allocate(size_t word_size); 154 155 // Map a size to a list index assuming that there are lists 156 // for special, small, medium, and humongous chunks. 157 static ChunkIndex list_index(size_t size); 158 159 // Remove the chunk from its freelist. It is 160 // expected to be on one of the _free_chunks[] lists. 161 void remove_chunk(Metachunk* chunk); 162 163 // Add the simple linked list of chunks to the freelist of chunks 164 // of type index. 165 void return_chunks(ChunkIndex index, Metachunk* chunks); 166 167 // Total of the space in the free chunks list 168 size_t free_chunks_total_words(); 169 size_t free_chunks_total_bytes(); 170 171 // Number of chunks in the free chunks list 172 size_t free_chunks_count(); 173 174 void inc_free_chunks_total(size_t v, size_t count = 1) { 175 Atomic::add_ptr(count, &_free_chunks_count); 176 Atomic::add_ptr(v, &_free_chunks_total); 177 } 178 ChunkTreeDictionary* humongous_dictionary() { 179 return &_humongous_dictionary; 180 } 181 182 ChunkList* free_chunks(ChunkIndex index); 183 184 // Returns the list for the given chunk word size. 185 ChunkList* find_free_chunks_list(size_t word_size); 186 187 // Remove from a list by size. Selects list based on size of chunk. 188 Metachunk* free_chunks_get(size_t chunk_word_size); 189 190 #define index_bounds_check(index) \ 191 assert(index == SpecializedIndex || \ 192 index == SmallIndex || \ 193 index == MediumIndex || \ 194 index == HumongousIndex, err_msg("Bad index: %d", (int) index)) 195 196 size_t num_free_chunks(ChunkIndex index) const { 197 index_bounds_check(index); 198 199 if (index == HumongousIndex) { 200 return _humongous_dictionary.total_free_blocks(); 201 } 202 203 ssize_t count = _free_chunks[index].count(); 204 return count == -1 ? 0 : (size_t) count; 205 } 206 207 size_t size_free_chunks_in_bytes(ChunkIndex index) const { 208 index_bounds_check(index); 209 210 size_t word_size = 0; 211 if (index == HumongousIndex) { 212 word_size = _humongous_dictionary.total_size(); 213 } else { 214 const size_t size_per_chunk_in_words = _free_chunks[index].size(); 215 word_size = size_per_chunk_in_words * num_free_chunks(index); 216 } 217 218 return word_size * BytesPerWord; 219 } 220 221 MetaspaceChunkFreeListSummary chunk_free_list_summary() const { 222 return MetaspaceChunkFreeListSummary(num_free_chunks(SpecializedIndex), 223 num_free_chunks(SmallIndex), 224 num_free_chunks(MediumIndex), 225 num_free_chunks(HumongousIndex), 226 size_free_chunks_in_bytes(SpecializedIndex), 227 size_free_chunks_in_bytes(SmallIndex), 228 size_free_chunks_in_bytes(MediumIndex), 229 size_free_chunks_in_bytes(HumongousIndex)); 230 } 231 232 // Debug support 233 void verify(); 234 void slow_verify() { 235 if (metaspace_slow_verify) { 236 verify(); 237 } 238 } 239 void locked_verify(); 240 void slow_locked_verify() { 241 if (metaspace_slow_verify) { 242 locked_verify(); 243 } 244 } 245 void verify_free_chunks_total(); 246 247 void locked_print_free_chunks(outputStream* st); 248 void locked_print_sum_free_chunks(outputStream* st); 249 250 void print_on(outputStream* st) const; 251 }; 252 253 // Used to manage the free list of Metablocks (a block corresponds 254 // to the allocation of a quantum of metadata). 255 class BlockFreelist VALUE_OBJ_CLASS_SPEC { 256 BlockTreeDictionary* _dictionary; 257 258 // Only allocate and split from freelist if the size of the allocation 259 // is at least 1/4th the size of the available block. 260 const static int WasteMultiplier = 4; 261 262 // Accessors 263 BlockTreeDictionary* dictionary() const { return _dictionary; } 264 265 public: 266 BlockFreelist(); 267 ~BlockFreelist(); 268 269 // Get and return a block to the free list 270 MetaWord* get_block(size_t word_size); 271 void return_block(MetaWord* p, size_t word_size); 272 273 size_t total_size() { 274 if (dictionary() == NULL) { 275 return 0; 276 } else { 277 return dictionary()->total_size(); 278 } 279 } 280 281 void print_on(outputStream* st) const; 282 }; 283 284 // A VirtualSpaceList node. 285 class VirtualSpaceNode : public CHeapObj<mtClass> { 286 friend class VirtualSpaceList; 287 288 // Link to next VirtualSpaceNode 289 VirtualSpaceNode* _next; 290 291 // total in the VirtualSpace 292 MemRegion _reserved; 293 ReservedSpace _rs; 294 VirtualSpace _virtual_space; 295 MetaWord* _top; 296 // count of chunks contained in this VirtualSpace 297 uintx _container_count; 298 299 // Convenience functions to access the _virtual_space 300 char* low() const { return virtual_space()->low(); } 301 char* high() const { return virtual_space()->high(); } 302 303 // The first Metachunk will be allocated at the bottom of the 304 // VirtualSpace 305 Metachunk* first_chunk() { return (Metachunk*) bottom(); } 306 307 // Committed but unused space in the virtual space 308 size_t free_words_in_vs() const; 309 public: 310 311 VirtualSpaceNode(size_t byte_size); 312 VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {} 313 ~VirtualSpaceNode(); 314 315 // Convenience functions for logical bottom and end 316 MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); } 317 MetaWord* end() const { return (MetaWord*) _virtual_space.high(); } 318 319 bool contains(const void* ptr) { return ptr >= low() && ptr < high(); } 320 321 size_t reserved_words() const { return _virtual_space.reserved_size() / BytesPerWord; } 322 size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; } 323 324 bool is_pre_committed() const { return _virtual_space.special(); } 325 326 // address of next available space in _virtual_space; 327 // Accessors 328 VirtualSpaceNode* next() { return _next; } 329 void set_next(VirtualSpaceNode* v) { _next = v; } 330 331 void set_reserved(MemRegion const v) { _reserved = v; } 332 void set_top(MetaWord* v) { _top = v; } 333 334 // Accessors 335 MemRegion* reserved() { return &_reserved; } 336 VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; } 337 338 // Returns true if "word_size" is available in the VirtualSpace 339 bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); } 340 341 MetaWord* top() const { return _top; } 342 void inc_top(size_t word_size) { _top += word_size; } 343 344 uintx container_count() { return _container_count; } 345 void inc_container_count(); 346 void dec_container_count(); 347 #ifdef ASSERT 348 uint container_count_slow(); 349 void verify_container_count(); 350 #endif 351 352 // used and capacity in this single entry in the list 353 size_t used_words_in_vs() const; 354 size_t capacity_words_in_vs() const; 355 356 bool initialize(); 357 358 // get space from the virtual space 359 Metachunk* take_from_committed(size_t chunk_word_size); 360 361 // Allocate a chunk from the virtual space and return it. 362 Metachunk* get_chunk_vs(size_t chunk_word_size); 363 364 // Expands/shrinks the committed space in a virtual space. Delegates 365 // to Virtualspace 366 bool expand_by(size_t min_words, size_t preferred_words); 367 368 // In preparation for deleting this node, remove all the chunks 369 // in the node from any freelist. 370 void purge(ChunkManager* chunk_manager); 371 372 // If an allocation doesn't fit in the current node a new node is created. 373 // Allocate chunks out of the remaining committed space in this node 374 // to avoid wasting that memory. 375 // This always adds up because all the chunk sizes are multiples of 376 // the smallest chunk size. 377 void retire(ChunkManager* chunk_manager); 378 379 #ifdef ASSERT 380 // Debug support 381 void mangle(); 382 #endif 383 384 void print_on(outputStream* st) const; 385 }; 386 387 #define assert_is_ptr_aligned(ptr, alignment) \ 388 assert(is_ptr_aligned(ptr, alignment), \ 389 err_msg(PTR_FORMAT " is not aligned to " \ 390 SIZE_FORMAT, ptr, alignment)) 391 392 #define assert_is_size_aligned(size, alignment) \ 393 assert(is_size_aligned(size, alignment), \ 394 err_msg(SIZE_FORMAT " is not aligned to " \ 395 SIZE_FORMAT, size, alignment)) 396 397 398 // Decide if large pages should be committed when the memory is reserved. 399 static bool should_commit_large_pages_when_reserving(size_t bytes) { 400 if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { 401 size_t words = bytes / BytesPerWord; 402 bool is_class = false; // We never reserve large pages for the class space. 403 if (MetaspaceGC::can_expand(words, is_class) && 404 MetaspaceGC::allowed_expansion() >= words) { 405 return true; 406 } 407 } 408 409 return false; 410 } 411 412 // byte_size is the size of the associated virtualspace. 413 VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) { 414 assert_is_size_aligned(bytes, Metaspace::reserve_alignment()); 415 416 // This allocates memory with mmap. For DumpSharedspaces, try to reserve 417 // configurable address, generally at the top of the Java heap so other 418 // memory addresses don't conflict. 419 if (DumpSharedSpaces) { 420 bool large_pages = false; // No large pages when dumping the CDS archive. 421 char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment()); 422 423 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0); 424 if (_rs.is_reserved()) { 425 assert(shared_base == 0 || _rs.base() == shared_base, "should match"); 426 } else { 427 // Get a mmap region anywhere if the SharedBaseAddress fails. 428 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 429 } 430 MetaspaceShared::set_shared_rs(&_rs); 431 } else { 432 bool large_pages = should_commit_large_pages_when_reserving(bytes); 433 434 _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); 435 } 436 437 if (_rs.is_reserved()) { 438 assert(_rs.base() != NULL, "Catch if we get a NULL address"); 439 assert(_rs.size() != 0, "Catch if we get a 0 size"); 440 assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment()); 441 assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment()); 442 443 MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); 444 } 445 } 446 447 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { 448 Metachunk* chunk = first_chunk(); 449 Metachunk* invalid_chunk = (Metachunk*) top(); 450 while (chunk < invalid_chunk ) { 451 assert(chunk->is_tagged_free(), "Should be tagged free"); 452 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 453 chunk_manager->remove_chunk(chunk); 454 assert(chunk->next() == NULL && 455 chunk->prev() == NULL, 456 "Was not removed from its list"); 457 chunk = (Metachunk*) next; 458 } 459 } 460 461 #ifdef ASSERT 462 uint VirtualSpaceNode::container_count_slow() { 463 uint count = 0; 464 Metachunk* chunk = first_chunk(); 465 Metachunk* invalid_chunk = (Metachunk*) top(); 466 while (chunk < invalid_chunk ) { 467 MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); 468 // Don't count the chunks on the free lists. Those are 469 // still part of the VirtualSpaceNode but not currently 470 // counted. 471 if (!chunk->is_tagged_free()) { 472 count++; 473 } 474 chunk = (Metachunk*) next; 475 } 476 return count; 477 } 478 #endif 479 480 // List of VirtualSpaces for metadata allocation. 481 class VirtualSpaceList : public CHeapObj<mtClass> { 482 friend class VirtualSpaceNode; 483 484 enum VirtualSpaceSizes { 485 VirtualSpaceSize = 256 * K 486 }; 487 488 // Head of the list 489 VirtualSpaceNode* _virtual_space_list; 490 // virtual space currently being used for allocations 491 VirtualSpaceNode* _current_virtual_space; 492 493 // Is this VirtualSpaceList used for the compressed class space 494 bool _is_class; 495 496 // Sum of reserved and committed memory in the virtual spaces 497 size_t _reserved_words; 498 size_t _committed_words; 499 500 // Number of virtual spaces 501 size_t _virtual_space_count; 502 503 ~VirtualSpaceList(); 504 505 VirtualSpaceNode* virtual_space_list() const { return _virtual_space_list; } 506 507 void set_virtual_space_list(VirtualSpaceNode* v) { 508 _virtual_space_list = v; 509 } 510 void set_current_virtual_space(VirtualSpaceNode* v) { 511 _current_virtual_space = v; 512 } 513 514 void link_vs(VirtualSpaceNode* new_entry); 515 516 // Get another virtual space and add it to the list. This 517 // is typically prompted by a failed attempt to allocate a chunk 518 // and is typically followed by the allocation of a chunk. 519 bool create_new_virtual_space(size_t vs_word_size); 520 521 // Chunk up the unused committed space in the current 522 // virtual space and add the chunks to the free list. 523 void retire_current_virtual_space(); 524 525 public: 526 VirtualSpaceList(size_t word_size); 527 VirtualSpaceList(ReservedSpace rs); 528 529 size_t free_bytes(); 530 531 Metachunk* get_new_chunk(size_t word_size, 532 size_t grow_chunks_by_words, 533 size_t medium_chunk_bunch); 534 535 bool expand_node_by(VirtualSpaceNode* node, 536 size_t min_words, 537 size_t preferred_words); 538 539 bool expand_by(size_t min_words, 540 size_t preferred_words); 541 542 VirtualSpaceNode* current_virtual_space() { 543 return _current_virtual_space; 544 } 545 546 bool is_class() const { return _is_class; } 547 548 bool initialization_succeeded() { return _virtual_space_list != NULL; } 549 550 size_t reserved_words() { return _reserved_words; } 551 size_t reserved_bytes() { return reserved_words() * BytesPerWord; } 552 size_t committed_words() { return _committed_words; } 553 size_t committed_bytes() { return committed_words() * BytesPerWord; } 554 555 void inc_reserved_words(size_t v); 556 void dec_reserved_words(size_t v); 557 void inc_committed_words(size_t v); 558 void dec_committed_words(size_t v); 559 void inc_virtual_space_count(); 560 void dec_virtual_space_count(); 561 562 bool contains(const void* ptr); 563 564 // Unlink empty VirtualSpaceNodes and free it. 565 void purge(ChunkManager* chunk_manager); 566 567 void print_on(outputStream* st) const; 568 569 class VirtualSpaceListIterator : public StackObj { 570 VirtualSpaceNode* _virtual_spaces; 571 public: 572 VirtualSpaceListIterator(VirtualSpaceNode* virtual_spaces) : 573 _virtual_spaces(virtual_spaces) {} 574 575 bool repeat() { 576 return _virtual_spaces != NULL; 577 } 578 579 VirtualSpaceNode* get_next() { 580 VirtualSpaceNode* result = _virtual_spaces; 581 if (_virtual_spaces != NULL) { 582 _virtual_spaces = _virtual_spaces->next(); 583 } 584 return result; 585 } 586 }; 587 }; 588 589 class Metadebug : AllStatic { 590 // Debugging support for Metaspaces 591 static int _allocation_fail_alot_count; 592 593 public: 594 595 static void init_allocation_fail_alot_count(); 596 #ifdef ASSERT 597 static bool test_metadata_failure(); 598 #endif 599 }; 600 601 int Metadebug::_allocation_fail_alot_count = 0; 602 603 // SpaceManager - used by Metaspace to handle allocations 604 class SpaceManager : public CHeapObj<mtClass> { 605 friend class Metaspace; 606 friend class Metadebug; 607 608 private: 609 610 // protects allocations 611 Mutex* const _lock; 612 613 // Type of metadata allocated. 614 Metaspace::MetadataType _mdtype; 615 616 // List of chunks in use by this SpaceManager. Allocations 617 // are done from the current chunk. The list is used for deallocating 618 // chunks when the SpaceManager is freed. 619 Metachunk* _chunks_in_use[NumberOfInUseLists]; 620 Metachunk* _current_chunk; 621 622 // Number of small chunks to allocate to a manager 623 // If class space manager, small chunks are unlimited 624 static uint const _small_chunk_limit; 625 626 // Sum of all space in allocated chunks 627 size_t _allocated_blocks_words; 628 629 // Sum of all allocated chunks 630 size_t _allocated_chunks_words; 631 size_t _allocated_chunks_count; 632 633 // Free lists of blocks are per SpaceManager since they 634 // are assumed to be in chunks in use by the SpaceManager 635 // and all chunks in use by a SpaceManager are freed when 636 // the class loader using the SpaceManager is collected. 637 BlockFreelist _block_freelists; 638 639 // protects virtualspace and chunk expansions 640 static const char* _expand_lock_name; 641 static const int _expand_lock_rank; 642 static Mutex* const _expand_lock; 643 644 private: 645 // Accessors 646 Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; } 647 void set_chunks_in_use(ChunkIndex index, Metachunk* v) { 648 _chunks_in_use[index] = v; 649 } 650 651 BlockFreelist* block_freelists() const { 652 return (BlockFreelist*) &_block_freelists; 653 } 654 655 Metaspace::MetadataType mdtype() { return _mdtype; } 656 657 VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); } 658 ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); } 659 660 Metachunk* current_chunk() const { return _current_chunk; } 661 void set_current_chunk(Metachunk* v) { 662 _current_chunk = v; 663 } 664 665 Metachunk* find_current_chunk(size_t word_size); 666 667 // Add chunk to the list of chunks in use 668 void add_chunk(Metachunk* v, bool make_current); 669 void retire_current_chunk(); 670 671 Mutex* lock() const { return _lock; } 672 673 const char* chunk_size_name(ChunkIndex index) const; 674 675 protected: 676 void initialize(); 677 678 public: 679 SpaceManager(Metaspace::MetadataType mdtype, 680 Mutex* lock); 681 ~SpaceManager(); 682 683 enum ChunkMultiples { 684 MediumChunkMultiple = 4 685 }; 686 687 bool is_class() { return _mdtype == Metaspace::ClassType; } 688 689 // Accessors 690 size_t specialized_chunk_size() { return (size_t) is_class() ? ClassSpecializedChunk : SpecializedChunk; } 691 size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; } 692 size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; } 693 size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; } 694 695 size_t smallest_chunk_size() { return specialized_chunk_size(); } 696 697 size_t allocated_blocks_words() const { return _allocated_blocks_words; } 698 size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; } 699 size_t allocated_chunks_words() const { return _allocated_chunks_words; } 700 size_t allocated_chunks_count() const { return _allocated_chunks_count; } 701 702 bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); } 703 704 static Mutex* expand_lock() { return _expand_lock; } 705 706 // Increment the per Metaspace and global running sums for Metachunks 707 // by the given size. This is used when a Metachunk to added to 708 // the in-use list. 709 void inc_size_metrics(size_t words); 710 // Increment the per Metaspace and global running sums Metablocks by the given 711 // size. This is used when a Metablock is allocated. 712 void inc_used_metrics(size_t words); 713 // Delete the portion of the running sums for this SpaceManager. That is, 714 // the globals running sums for the Metachunks and Metablocks are 715 // decremented for all the Metachunks in-use by this SpaceManager. 716 void dec_total_from_size_metrics(); 717 718 // Set the sizes for the initial chunks. 719 void get_initial_chunk_sizes(Metaspace::MetaspaceType type, 720 size_t* chunk_word_size, 721 size_t* class_chunk_word_size); 722 723 size_t sum_capacity_in_chunks_in_use() const; 724 size_t sum_used_in_chunks_in_use() const; 725 size_t sum_free_in_chunks_in_use() const; 726 size_t sum_waste_in_chunks_in_use() const; 727 size_t sum_waste_in_chunks_in_use(ChunkIndex index ) const; 728 729 size_t sum_count_in_chunks_in_use(); 730 size_t sum_count_in_chunks_in_use(ChunkIndex i); 731 732 Metachunk* get_new_chunk(size_t word_size, size_t grow_chunks_by_words); 733 734 // Block allocation and deallocation. 735 // Allocates a block from the current chunk 736 MetaWord* allocate(size_t word_size); 737 738 // Helper for allocations 739 MetaWord* allocate_work(size_t word_size); 740 741 // Returns a block to the per manager freelist 742 void deallocate(MetaWord* p, size_t word_size); 743 744 // Based on the allocation size and a minimum chunk size, 745 // returned chunk size (for expanding space for chunk allocation). 746 size_t calc_chunk_size(size_t allocation_word_size); 747 748 // Called when an allocation from the current chunk fails. 749 // Gets a new chunk (may require getting a new virtual space), 750 // and allocates from that chunk. 751 MetaWord* grow_and_allocate(size_t word_size); 752 753 // Notify memory usage to MemoryService. 754 void track_metaspace_memory_usage(); 755 756 // debugging support. 757 758 void dump(outputStream* const out) const; 759 void print_on(outputStream* st) const; 760 void locked_print_chunks_in_use_on(outputStream* st) const; 761 762 void verify(); 763 void verify_chunk_size(Metachunk* chunk); 764 NOT_PRODUCT(void mangle_freed_chunks();) 765 #ifdef ASSERT 766 void verify_allocated_blocks_words(); 767 #endif 768 769 size_t get_raw_word_size(size_t word_size) { 770 size_t byte_size = word_size * BytesPerWord; 771 772 size_t raw_bytes_size = MAX2(byte_size, sizeof(Metablock)); 773 raw_bytes_size = align_size_up(raw_bytes_size, Metachunk::object_alignment()); 774 775 size_t raw_word_size = raw_bytes_size / BytesPerWord; 776 assert(raw_word_size * BytesPerWord == raw_bytes_size, "Size problem"); 777 778 return raw_word_size; 779 } 780 }; 781 782 uint const SpaceManager::_small_chunk_limit = 4; 783 784 const char* SpaceManager::_expand_lock_name = 785 "SpaceManager chunk allocation lock"; 786 const int SpaceManager::_expand_lock_rank = Monitor::leaf - 1; 787 Mutex* const SpaceManager::_expand_lock = 788 new Mutex(SpaceManager::_expand_lock_rank, 789 SpaceManager::_expand_lock_name, 790 Mutex::_allow_vm_block_flag); 791 792 void VirtualSpaceNode::inc_container_count() { 793 assert_lock_strong(SpaceManager::expand_lock()); 794 _container_count++; 795 assert(_container_count == container_count_slow(), 796 err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT 797 " container_count_slow() " SIZE_FORMAT, 798 _container_count, container_count_slow())); 799 } 800 801 void VirtualSpaceNode::dec_container_count() { 802 assert_lock_strong(SpaceManager::expand_lock()); 803 _container_count--; 804 } 805 806 #ifdef ASSERT 807 void VirtualSpaceNode::verify_container_count() { 808 assert(_container_count == container_count_slow(), 809 err_msg("Inconsistency in container_count _container_count " SIZE_FORMAT 810 " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow())); 811 } 812 #endif 813 814 // BlockFreelist methods 815 816 BlockFreelist::BlockFreelist() : _dictionary(NULL) {} 817 818 BlockFreelist::~BlockFreelist() { 819 if (_dictionary != NULL) { 820 if (Verbose && TraceMetadataChunkAllocation) { 821 _dictionary->print_free_lists(gclog_or_tty); 822 } 823 delete _dictionary; 824 } 825 } 826 827 void BlockFreelist::return_block(MetaWord* p, size_t word_size) { 828 Metablock* free_chunk = ::new (p) Metablock(word_size); 829 if (dictionary() == NULL) { 830 _dictionary = new BlockTreeDictionary(); 831 } 832 dictionary()->return_chunk(free_chunk); 833 } 834 835 MetaWord* BlockFreelist::get_block(size_t word_size) { 836 if (dictionary() == NULL) { 837 return NULL; 838 } 839 840 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 841 // Dark matter. Too small for dictionary. 842 return NULL; 843 } 844 845 Metablock* free_block = 846 dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast); 847 if (free_block == NULL) { 848 return NULL; 849 } 850 851 const size_t block_size = free_block->size(); 852 if (block_size > WasteMultiplier * word_size) { 853 return_block((MetaWord*)free_block, block_size); 854 return NULL; 855 } 856 857 MetaWord* new_block = (MetaWord*)free_block; 858 assert(block_size >= word_size, "Incorrect size of block from freelist"); 859 const size_t unused = block_size - word_size; 860 if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 861 return_block(new_block + word_size, unused); 862 } 863 864 return new_block; 865 } 866 867 void BlockFreelist::print_on(outputStream* st) const { 868 if (dictionary() == NULL) { 869 return; 870 } 871 dictionary()->print_free_lists(st); 872 } 873 874 // VirtualSpaceNode methods 875 876 VirtualSpaceNode::~VirtualSpaceNode() { 877 _rs.release(); 878 #ifdef ASSERT 879 size_t word_size = sizeof(*this) / BytesPerWord; 880 Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); 881 #endif 882 } 883 884 size_t VirtualSpaceNode::used_words_in_vs() const { 885 return pointer_delta(top(), bottom(), sizeof(MetaWord)); 886 } 887 888 // Space committed in the VirtualSpace 889 size_t VirtualSpaceNode::capacity_words_in_vs() const { 890 return pointer_delta(end(), bottom(), sizeof(MetaWord)); 891 } 892 893 size_t VirtualSpaceNode::free_words_in_vs() const { 894 return pointer_delta(end(), top(), sizeof(MetaWord)); 895 } 896 897 // Allocates the chunk from the virtual space only. 898 // This interface is also used internally for debugging. Not all 899 // chunks removed here are necessarily used for allocation. 900 Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { 901 // Bottom of the new chunk 902 MetaWord* chunk_limit = top(); 903 assert(chunk_limit != NULL, "Not safe to call this method"); 904 905 // The virtual spaces are always expanded by the 906 // commit granularity to enforce the following condition. 907 // Without this the is_available check will not work correctly. 908 assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), 909 "The committed memory doesn't match the expanded memory."); 910 911 if (!is_available(chunk_word_size)) { 912 if (TraceMetadataChunkAllocation) { 913 gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size); 914 // Dump some information about the virtual space that is nearly full 915 print_on(gclog_or_tty); 916 } 917 return NULL; 918 } 919 920 // Take the space (bump top on the current virtual space). 921 inc_top(chunk_word_size); 922 923 // Initialize the chunk 924 Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this); 925 return result; 926 } 927 928 929 // Expand the virtual space (commit more of the reserved space) 930 bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { 931 size_t min_bytes = min_words * BytesPerWord; 932 size_t preferred_bytes = preferred_words * BytesPerWord; 933 934 size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); 935 936 if (uncommitted < min_bytes) { 937 return false; 938 } 939 940 size_t commit = MIN2(preferred_bytes, uncommitted); 941 bool result = virtual_space()->expand_by(commit, false); 942 943 assert(result, "Failed to commit memory"); 944 945 return result; 946 } 947 948 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { 949 assert_lock_strong(SpaceManager::expand_lock()); 950 Metachunk* result = take_from_committed(chunk_word_size); 951 if (result != NULL) { 952 inc_container_count(); 953 } 954 return result; 955 } 956 957 bool VirtualSpaceNode::initialize() { 958 959 if (!_rs.is_reserved()) { 960 return false; 961 } 962 963 // These are necessary restriction to make sure that the virtual space always 964 // grows in steps of Metaspace::commit_alignment(). If both base and size are 965 // aligned only the middle alignment of the VirtualSpace is used. 966 assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment()); 967 assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment()); 968 969 // ReservedSpaces marked as special will have the entire memory 970 // pre-committed. Setting a committed size will make sure that 971 // committed_size and actual_committed_size agrees. 972 size_t pre_committed_size = _rs.special() ? _rs.size() : 0; 973 974 bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, 975 Metaspace::commit_alignment()); 976 if (result) { 977 assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), 978 "Checking that the pre-committed memory was registered by the VirtualSpace"); 979 980 set_top((MetaWord*)virtual_space()->low()); 981 set_reserved(MemRegion((HeapWord*)_rs.base(), 982 (HeapWord*)(_rs.base() + _rs.size()))); 983 984 assert(reserved()->start() == (HeapWord*) _rs.base(), 985 err_msg("Reserved start was not set properly " PTR_FORMAT 986 " != " PTR_FORMAT, reserved()->start(), _rs.base())); 987 assert(reserved()->word_size() == _rs.size() / BytesPerWord, 988 err_msg("Reserved size was not set properly " SIZE_FORMAT 989 " != " SIZE_FORMAT, reserved()->word_size(), 990 _rs.size() / BytesPerWord)); 991 } 992 993 return result; 994 } 995 996 void VirtualSpaceNode::print_on(outputStream* st) const { 997 size_t used = used_words_in_vs(); 998 size_t capacity = capacity_words_in_vs(); 999 VirtualSpace* vs = virtual_space(); 1000 st->print_cr(" space @ " PTR_FORMAT " " SIZE_FORMAT "K, %3d%% used " 1001 "[" PTR_FORMAT ", " PTR_FORMAT ", " 1002 PTR_FORMAT ", " PTR_FORMAT ")", 1003 vs, capacity / K, 1004 capacity == 0 ? 0 : used * 100 / capacity, 1005 bottom(), top(), end(), 1006 vs->high_boundary()); 1007 } 1008 1009 #ifdef ASSERT 1010 void VirtualSpaceNode::mangle() { 1011 size_t word_size = capacity_words_in_vs(); 1012 Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); 1013 } 1014 #endif // ASSERT 1015 1016 // VirtualSpaceList methods 1017 // Space allocated from the VirtualSpace 1018 1019 VirtualSpaceList::~VirtualSpaceList() { 1020 VirtualSpaceListIterator iter(virtual_space_list()); 1021 while (iter.repeat()) { 1022 VirtualSpaceNode* vsl = iter.get_next(); 1023 delete vsl; 1024 } 1025 } 1026 1027 void VirtualSpaceList::inc_reserved_words(size_t v) { 1028 assert_lock_strong(SpaceManager::expand_lock()); 1029 _reserved_words = _reserved_words + v; 1030 } 1031 void VirtualSpaceList::dec_reserved_words(size_t v) { 1032 assert_lock_strong(SpaceManager::expand_lock()); 1033 _reserved_words = _reserved_words - v; 1034 } 1035 1036 #define assert_committed_below_limit() \ 1037 assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize, \ 1038 err_msg("Too much committed memory. Committed: " SIZE_FORMAT \ 1039 " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ 1040 MetaspaceAux::committed_bytes(), MaxMetaspaceSize)); 1041 1042 void VirtualSpaceList::inc_committed_words(size_t v) { 1043 assert_lock_strong(SpaceManager::expand_lock()); 1044 _committed_words = _committed_words + v; 1045 1046 assert_committed_below_limit(); 1047 } 1048 void VirtualSpaceList::dec_committed_words(size_t v) { 1049 assert_lock_strong(SpaceManager::expand_lock()); 1050 _committed_words = _committed_words - v; 1051 1052 assert_committed_below_limit(); 1053 } 1054 1055 void VirtualSpaceList::inc_virtual_space_count() { 1056 assert_lock_strong(SpaceManager::expand_lock()); 1057 _virtual_space_count++; 1058 } 1059 void VirtualSpaceList::dec_virtual_space_count() { 1060 assert_lock_strong(SpaceManager::expand_lock()); 1061 _virtual_space_count--; 1062 } 1063 1064 void ChunkManager::remove_chunk(Metachunk* chunk) { 1065 size_t word_size = chunk->word_size(); 1066 ChunkIndex index = list_index(word_size); 1067 if (index != HumongousIndex) { 1068 free_chunks(index)->remove_chunk(chunk); 1069 } else { 1070 humongous_dictionary()->remove_chunk(chunk); 1071 } 1072 1073 // Chunk is being removed from the chunks free list. 1074 dec_free_chunks_total(chunk->word_size()); 1075 } 1076 1077 // Walk the list of VirtualSpaceNodes and delete 1078 // nodes with a 0 container_count. Remove Metachunks in 1079 // the node from their respective freelists. 1080 void VirtualSpaceList::purge(ChunkManager* chunk_manager) { 1081 assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work"); 1082 assert_lock_strong(SpaceManager::expand_lock()); 1083 // Don't use a VirtualSpaceListIterator because this 1084 // list is being changed and a straightforward use of an iterator is not safe. 1085 VirtualSpaceNode* purged_vsl = NULL; 1086 VirtualSpaceNode* prev_vsl = virtual_space_list(); 1087 VirtualSpaceNode* next_vsl = prev_vsl; 1088 while (next_vsl != NULL) { 1089 VirtualSpaceNode* vsl = next_vsl; 1090 next_vsl = vsl->next(); 1091 // Don't free the current virtual space since it will likely 1092 // be needed soon. 1093 if (vsl->container_count() == 0 && vsl != current_virtual_space()) { 1094 // Unlink it from the list 1095 if (prev_vsl == vsl) { 1096 // This is the case of the current node being the first node. 1097 assert(vsl == virtual_space_list(), "Expected to be the first node"); 1098 set_virtual_space_list(vsl->next()); 1099 } else { 1100 prev_vsl->set_next(vsl->next()); 1101 } 1102 1103 vsl->purge(chunk_manager); 1104 dec_reserved_words(vsl->reserved_words()); 1105 dec_committed_words(vsl->committed_words()); 1106 dec_virtual_space_count(); 1107 purged_vsl = vsl; 1108 delete vsl; 1109 } else { 1110 prev_vsl = vsl; 1111 } 1112 } 1113 #ifdef ASSERT 1114 if (purged_vsl != NULL) { 1115 // List should be stable enough to use an iterator here. 1116 VirtualSpaceListIterator iter(virtual_space_list()); 1117 while (iter.repeat()) { 1118 VirtualSpaceNode* vsl = iter.get_next(); 1119 assert(vsl != purged_vsl, "Purge of vsl failed"); 1120 } 1121 } 1122 #endif 1123 } 1124 1125 1126 // This function looks at the mmap regions in the metaspace without locking. 1127 // The chunks are added with store ordering and not deleted except for at 1128 // unloading time during a safepoint. 1129 bool VirtualSpaceList::contains(const void* ptr) { 1130 // List should be stable enough to use an iterator here because removing virtual 1131 // space nodes is only allowed at a safepoint. 1132 VirtualSpaceListIterator iter(virtual_space_list()); 1133 while (iter.repeat()) { 1134 VirtualSpaceNode* vsn = iter.get_next(); 1135 if (vsn->contains(ptr)) { 1136 return true; 1137 } 1138 } 1139 return false; 1140 } 1141 1142 void VirtualSpaceList::retire_current_virtual_space() { 1143 assert_lock_strong(SpaceManager::expand_lock()); 1144 1145 VirtualSpaceNode* vsn = current_virtual_space(); 1146 1147 ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : 1148 Metaspace::chunk_manager_metadata(); 1149 1150 vsn->retire(cm); 1151 } 1152 1153 void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { 1154 for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { 1155 ChunkIndex index = (ChunkIndex)i; 1156 size_t chunk_size = chunk_manager->free_chunks(index)->size(); 1157 1158 while (free_words_in_vs() >= chunk_size) { 1159 DEBUG_ONLY(verify_container_count();) 1160 Metachunk* chunk = get_chunk_vs(chunk_size); 1161 assert(chunk != NULL, "allocation should have been successful"); 1162 1163 chunk_manager->return_chunks(index, chunk); 1164 chunk_manager->inc_free_chunks_total(chunk_size); 1165 DEBUG_ONLY(verify_container_count();) 1166 } 1167 } 1168 assert(free_words_in_vs() == 0, "should be empty now"); 1169 } 1170 1171 VirtualSpaceList::VirtualSpaceList(size_t word_size) : 1172 _is_class(false), 1173 _virtual_space_list(NULL), 1174 _current_virtual_space(NULL), 1175 _reserved_words(0), 1176 _committed_words(0), 1177 _virtual_space_count(0) { 1178 MutexLockerEx cl(SpaceManager::expand_lock(), 1179 Mutex::_no_safepoint_check_flag); 1180 create_new_virtual_space(word_size); 1181 } 1182 1183 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : 1184 _is_class(true), 1185 _virtual_space_list(NULL), 1186 _current_virtual_space(NULL), 1187 _reserved_words(0), 1188 _committed_words(0), 1189 _virtual_space_count(0) { 1190 MutexLockerEx cl(SpaceManager::expand_lock(), 1191 Mutex::_no_safepoint_check_flag); 1192 VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs); 1193 bool succeeded = class_entry->initialize(); 1194 if (succeeded) { 1195 link_vs(class_entry); 1196 } 1197 } 1198 1199 size_t VirtualSpaceList::free_bytes() { 1200 return virtual_space_list()->free_words_in_vs() * BytesPerWord; 1201 } 1202 1203 // Allocate another meta virtual space and add it to the list. 1204 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { 1205 assert_lock_strong(SpaceManager::expand_lock()); 1206 1207 if (is_class()) { 1208 assert(false, "We currently don't support more than one VirtualSpace for" 1209 " the compressed class space. The initialization of the" 1210 " CCS uses another code path and should not hit this path."); 1211 return false; 1212 } 1213 1214 if (vs_word_size == 0) { 1215 assert(false, "vs_word_size should always be at least _reserve_alignment large."); 1216 return false; 1217 } 1218 1219 // Reserve the space 1220 size_t vs_byte_size = vs_word_size * BytesPerWord; 1221 assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment()); 1222 1223 // Allocate the meta virtual space and initialize it. 1224 VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size); 1225 if (!new_entry->initialize()) { 1226 delete new_entry; 1227 return false; 1228 } else { 1229 assert(new_entry->reserved_words() == vs_word_size, 1230 "Reserved memory size differs from requested memory size"); 1231 // ensure lock-free iteration sees fully initialized node 1232 OrderAccess::storestore(); 1233 link_vs(new_entry); 1234 return true; 1235 } 1236 } 1237 1238 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { 1239 if (virtual_space_list() == NULL) { 1240 set_virtual_space_list(new_entry); 1241 } else { 1242 current_virtual_space()->set_next(new_entry); 1243 } 1244 set_current_virtual_space(new_entry); 1245 inc_reserved_words(new_entry->reserved_words()); 1246 inc_committed_words(new_entry->committed_words()); 1247 inc_virtual_space_count(); 1248 #ifdef ASSERT 1249 new_entry->mangle(); 1250 #endif 1251 if (TraceMetavirtualspaceAllocation && Verbose) { 1252 VirtualSpaceNode* vsl = current_virtual_space(); 1253 vsl->print_on(gclog_or_tty); 1254 } 1255 } 1256 1257 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, 1258 size_t min_words, 1259 size_t preferred_words) { 1260 size_t before = node->committed_words(); 1261 1262 bool result = node->expand_by(min_words, preferred_words); 1263 1264 size_t after = node->committed_words(); 1265 1266 // after and before can be the same if the memory was pre-committed. 1267 assert(after >= before, "Inconsistency"); 1268 inc_committed_words(after - before); 1269 1270 return result; 1271 } 1272 1273 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { 1274 assert_is_size_aligned(min_words, Metaspace::commit_alignment_words()); 1275 assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words()); 1276 assert(min_words <= preferred_words, "Invalid arguments"); 1277 1278 if (!MetaspaceGC::can_expand(min_words, this->is_class())) { 1279 return false; 1280 } 1281 1282 size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); 1283 if (allowed_expansion_words < min_words) { 1284 return false; 1285 } 1286 1287 size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); 1288 1289 // Commit more memory from the the current virtual space. 1290 bool vs_expanded = expand_node_by(current_virtual_space(), 1291 min_words, 1292 max_expansion_words); 1293 if (vs_expanded) { 1294 return true; 1295 } 1296 retire_current_virtual_space(); 1297 1298 // Get another virtual space. 1299 size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); 1300 grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words()); 1301 1302 if (create_new_virtual_space(grow_vs_words)) { 1303 if (current_virtual_space()->is_pre_committed()) { 1304 // The memory was pre-committed, so we are done here. 1305 assert(min_words <= current_virtual_space()->committed_words(), 1306 "The new VirtualSpace was pre-committed, so it" 1307 "should be large enough to fit the alloc request."); 1308 return true; 1309 } 1310 1311 return expand_node_by(current_virtual_space(), 1312 min_words, 1313 max_expansion_words); 1314 } 1315 1316 return false; 1317 } 1318 1319 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size, 1320 size_t grow_chunks_by_words, 1321 size_t medium_chunk_bunch) { 1322 1323 // Allocate a chunk out of the current virtual space. 1324 Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1325 1326 if (next != NULL) { 1327 return next; 1328 } 1329 1330 // The expand amount is currently only determined by the requested sizes 1331 // and not how much committed memory is left in the current virtual space. 1332 1333 size_t min_word_size = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words()); 1334 size_t preferred_word_size = align_size_up(medium_chunk_bunch, Metaspace::commit_alignment_words()); 1335 if (min_word_size >= preferred_word_size) { 1336 // Can happen when humongous chunks are allocated. 1337 preferred_word_size = min_word_size; 1338 } 1339 1340 bool expanded = expand_by(min_word_size, preferred_word_size); 1341 if (expanded) { 1342 next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words); 1343 assert(next != NULL, "The allocation was expected to succeed after the expansion"); 1344 } 1345 1346 return next; 1347 } 1348 1349 void VirtualSpaceList::print_on(outputStream* st) const { 1350 if (TraceMetadataChunkAllocation && Verbose) { 1351 VirtualSpaceListIterator iter(virtual_space_list()); 1352 while (iter.repeat()) { 1353 VirtualSpaceNode* node = iter.get_next(); 1354 node->print_on(st); 1355 } 1356 } 1357 } 1358 1359 // MetaspaceGC methods 1360 1361 // VM_CollectForMetadataAllocation is the vm operation used to GC. 1362 // Within the VM operation after the GC the attempt to allocate the metadata 1363 // should succeed. If the GC did not free enough space for the metaspace 1364 // allocation, the HWM is increased so that another virtualspace will be 1365 // allocated for the metadata. With perm gen the increase in the perm 1366 // gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The 1367 // metaspace policy uses those as the small and large steps for the HWM. 1368 // 1369 // After the GC the compute_new_size() for MetaspaceGC is called to 1370 // resize the capacity of the metaspaces. The current implementation 1371 // is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used 1372 // to resize the Java heap by some GC's. New flags can be implemented 1373 // if really needed. MinMetaspaceFreeRatio is used to calculate how much 1374 // free space is desirable in the metaspace capacity to decide how much 1375 // to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much 1376 // free space is desirable in the metaspace capacity before decreasing 1377 // the HWM. 1378 1379 // Calculate the amount to increase the high water mark (HWM). 1380 // Increase by a minimum amount (MinMetaspaceExpansion) so that 1381 // another expansion is not requested too soon. If that is not 1382 // enough to satisfy the allocation, increase by MaxMetaspaceExpansion. 1383 // If that is still not enough, expand by the size of the allocation 1384 // plus some. 1385 size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) { 1386 size_t min_delta = MinMetaspaceExpansion; 1387 size_t max_delta = MaxMetaspaceExpansion; 1388 size_t delta = align_size_up(bytes, Metaspace::commit_alignment()); 1389 1390 if (delta <= min_delta) { 1391 delta = min_delta; 1392 } else if (delta <= max_delta) { 1393 // Don't want to hit the high water mark on the next 1394 // allocation so make the delta greater than just enough 1395 // for this allocation. 1396 delta = max_delta; 1397 } else { 1398 // This allocation is large but the next ones are probably not 1399 // so increase by the minimum. 1400 delta = delta + min_delta; 1401 } 1402 1403 assert_is_size_aligned(delta, Metaspace::commit_alignment()); 1404 1405 return delta; 1406 } 1407 1408 size_t MetaspaceGC::capacity_until_GC() { 1409 size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); 1410 assert(value >= MetaspaceSize, "Not initialied properly?"); 1411 return value; 1412 } 1413 1414 size_t MetaspaceGC::inc_capacity_until_GC(size_t v) { 1415 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1416 1417 return (size_t)Atomic::add_ptr(v, &_capacity_until_GC); 1418 } 1419 1420 size_t MetaspaceGC::dec_capacity_until_GC(size_t v) { 1421 assert_is_size_aligned(v, Metaspace::commit_alignment()); 1422 1423 return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC); 1424 } 1425 1426 void MetaspaceGC::initialize() { 1427 // Set the high-water mark to MaxMetapaceSize during VM initializaton since 1428 // we can't do a GC during initialization. 1429 _capacity_until_GC = MaxMetaspaceSize; 1430 } 1431 1432 void MetaspaceGC::post_initialize() { 1433 // Reset the high-water mark once the VM initialization is done. 1434 _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), (size_t)MetaspaceSize); 1435 } 1436 1437 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) { 1438 // Check if the compressed class space is full. 1439 if (is_class && Metaspace::using_class_space()) { 1440 size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType); 1441 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) { 1442 return false; 1443 } 1444 } 1445 1446 // Check if the user has imposed a limit on the metaspace memory. 1447 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1448 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) { 1449 return false; 1450 } 1451 1452 return true; 1453 } 1454 1455 size_t MetaspaceGC::allowed_expansion() { 1456 size_t committed_bytes = MetaspaceAux::committed_bytes(); 1457 size_t capacity_until_gc = capacity_until_GC(); 1458 1459 assert(capacity_until_gc >= committed_bytes, 1460 err_msg("capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT, 1461 capacity_until_gc, committed_bytes)); 1462 1463 size_t left_until_max = MaxMetaspaceSize - committed_bytes; 1464 size_t left_until_GC = capacity_until_gc - committed_bytes; 1465 size_t left_to_commit = MIN2(left_until_GC, left_until_max); 1466 1467 return left_to_commit / BytesPerWord; 1468 } 1469 1470 void MetaspaceGC::compute_new_size() { 1471 assert(_shrink_factor <= 100, "invalid shrink factor"); 1472 uint current_shrink_factor = _shrink_factor; 1473 _shrink_factor = 0; 1474 1475 // Using committed_bytes() for used_after_gc is an overestimation, since the 1476 // chunk free lists are included in committed_bytes() and the memory in an 1477 // un-fragmented chunk free list is available for future allocations. 1478 // However, if the chunk free lists becomes fragmented, then the memory may 1479 // not be available for future allocations and the memory is therefore "in use". 1480 // Including the chunk free lists in the definition of "in use" is therefore 1481 // necessary. Not including the chunk free lists can cause capacity_until_GC to 1482 // shrink below committed_bytes() and this has caused serious bugs in the past. 1483 const size_t used_after_gc = MetaspaceAux::committed_bytes(); 1484 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC(); 1485 1486 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0; 1487 const double maximum_used_percentage = 1.0 - minimum_free_percentage; 1488 1489 const double min_tmp = used_after_gc / maximum_used_percentage; 1490 size_t minimum_desired_capacity = 1491 (size_t)MIN2(min_tmp, double(max_uintx)); 1492 // Don't shrink less than the initial generation size 1493 minimum_desired_capacity = MAX2(minimum_desired_capacity, 1494 (size_t)MetaspaceSize); 1495 1496 if (PrintGCDetails && Verbose) { 1497 gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: "); 1498 gclog_or_tty->print_cr(" " 1499 " minimum_free_percentage: %6.2f" 1500 " maximum_used_percentage: %6.2f", 1501 minimum_free_percentage, 1502 maximum_used_percentage); 1503 gclog_or_tty->print_cr(" " 1504 " used_after_gc : %6.1fKB", 1505 used_after_gc / (double) K); 1506 } 1507 1508 1509 size_t shrink_bytes = 0; 1510 if (capacity_until_GC < minimum_desired_capacity) { 1511 // If we have less capacity below the metaspace HWM, then 1512 // increment the HWM. 1513 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC; 1514 expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment()); 1515 // Don't expand unless it's significant 1516 if (expand_bytes >= MinMetaspaceExpansion) { 1517 size_t new_capacity_until_GC = MetaspaceGC::inc_capacity_until_GC(expand_bytes); 1518 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1519 new_capacity_until_GC, 1520 MetaspaceGCThresholdUpdater::ComputeNewSize); 1521 if (PrintGCDetails && Verbose) { 1522 gclog_or_tty->print_cr(" expanding:" 1523 " minimum_desired_capacity: %6.1fKB" 1524 " expand_bytes: %6.1fKB" 1525 " MinMetaspaceExpansion: %6.1fKB" 1526 " new metaspace HWM: %6.1fKB", 1527 minimum_desired_capacity / (double) K, 1528 expand_bytes / (double) K, 1529 MinMetaspaceExpansion / (double) K, 1530 new_capacity_until_GC / (double) K); 1531 } 1532 } 1533 return; 1534 } 1535 1536 // No expansion, now see if we want to shrink 1537 // We would never want to shrink more than this 1538 assert(capacity_until_GC >= minimum_desired_capacity, 1539 err_msg(SIZE_FORMAT " >= " SIZE_FORMAT, 1540 capacity_until_GC, minimum_desired_capacity)); 1541 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity; 1542 1543 // Should shrinking be considered? 1544 if (MaxMetaspaceFreeRatio < 100) { 1545 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0; 1546 const double minimum_used_percentage = 1.0 - maximum_free_percentage; 1547 const double max_tmp = used_after_gc / minimum_used_percentage; 1548 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); 1549 maximum_desired_capacity = MAX2(maximum_desired_capacity, 1550 (size_t)MetaspaceSize); 1551 if (PrintGCDetails && Verbose) { 1552 gclog_or_tty->print_cr(" " 1553 " maximum_free_percentage: %6.2f" 1554 " minimum_used_percentage: %6.2f", 1555 maximum_free_percentage, 1556 minimum_used_percentage); 1557 gclog_or_tty->print_cr(" " 1558 " minimum_desired_capacity: %6.1fKB" 1559 " maximum_desired_capacity: %6.1fKB", 1560 minimum_desired_capacity / (double) K, 1561 maximum_desired_capacity / (double) K); 1562 } 1563 1564 assert(minimum_desired_capacity <= maximum_desired_capacity, 1565 "sanity check"); 1566 1567 if (capacity_until_GC > maximum_desired_capacity) { 1568 // Capacity too large, compute shrinking size 1569 shrink_bytes = capacity_until_GC - maximum_desired_capacity; 1570 // We don't want shrink all the way back to initSize if people call 1571 // System.gc(), because some programs do that between "phases" and then 1572 // we'd just have to grow the heap up again for the next phase. So we 1573 // damp the shrinking: 0% on the first call, 10% on the second call, 40% 1574 // on the third call, and 100% by the fourth call. But if we recompute 1575 // size without shrinking, it goes back to 0%. 1576 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; 1577 1578 shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment()); 1579 1580 assert(shrink_bytes <= max_shrink_bytes, 1581 err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT, 1582 shrink_bytes, max_shrink_bytes)); 1583 if (current_shrink_factor == 0) { 1584 _shrink_factor = 10; 1585 } else { 1586 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100); 1587 } 1588 if (PrintGCDetails && Verbose) { 1589 gclog_or_tty->print_cr(" " 1590 " shrinking:" 1591 " initSize: %.1fK" 1592 " maximum_desired_capacity: %.1fK", 1593 MetaspaceSize / (double) K, 1594 maximum_desired_capacity / (double) K); 1595 gclog_or_tty->print_cr(" " 1596 " shrink_bytes: %.1fK" 1597 " current_shrink_factor: %d" 1598 " new shrink factor: %d" 1599 " MinMetaspaceExpansion: %.1fK", 1600 shrink_bytes / (double) K, 1601 current_shrink_factor, 1602 _shrink_factor, 1603 MinMetaspaceExpansion / (double) K); 1604 } 1605 } 1606 } 1607 1608 // Don't shrink unless it's significant 1609 if (shrink_bytes >= MinMetaspaceExpansion && 1610 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) { 1611 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes); 1612 Metaspace::tracer()->report_gc_threshold(capacity_until_GC, 1613 new_capacity_until_GC, 1614 MetaspaceGCThresholdUpdater::ComputeNewSize); 1615 } 1616 } 1617 1618 // Metadebug methods 1619 1620 void Metadebug::init_allocation_fail_alot_count() { 1621 if (MetadataAllocationFailALot) { 1622 _allocation_fail_alot_count = 1623 1+(long)((double)MetadataAllocationFailALotInterval*os::random()/(max_jint+1.0)); 1624 } 1625 } 1626 1627 #ifdef ASSERT 1628 bool Metadebug::test_metadata_failure() { 1629 if (MetadataAllocationFailALot && 1630 Threads::is_vm_complete()) { 1631 if (_allocation_fail_alot_count > 0) { 1632 _allocation_fail_alot_count--; 1633 } else { 1634 if (TraceMetadataChunkAllocation && Verbose) { 1635 gclog_or_tty->print_cr("Metadata allocation failing for " 1636 "MetadataAllocationFailALot"); 1637 } 1638 init_allocation_fail_alot_count(); 1639 return true; 1640 } 1641 } 1642 return false; 1643 } 1644 #endif 1645 1646 // ChunkManager methods 1647 1648 size_t ChunkManager::free_chunks_total_words() { 1649 return _free_chunks_total; 1650 } 1651 1652 size_t ChunkManager::free_chunks_total_bytes() { 1653 return free_chunks_total_words() * BytesPerWord; 1654 } 1655 1656 size_t ChunkManager::free_chunks_count() { 1657 #ifdef ASSERT 1658 if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 1659 MutexLockerEx cl(SpaceManager::expand_lock(), 1660 Mutex::_no_safepoint_check_flag); 1661 // This lock is only needed in debug because the verification 1662 // of the _free_chunks_totals walks the list of free chunks 1663 slow_locked_verify_free_chunks_count(); 1664 } 1665 #endif 1666 return _free_chunks_count; 1667 } 1668 1669 void ChunkManager::locked_verify_free_chunks_total() { 1670 assert_lock_strong(SpaceManager::expand_lock()); 1671 assert(sum_free_chunks() == _free_chunks_total, 1672 err_msg("_free_chunks_total " SIZE_FORMAT " is not the" 1673 " same as sum " SIZE_FORMAT, _free_chunks_total, 1674 sum_free_chunks())); 1675 } 1676 1677 void ChunkManager::verify_free_chunks_total() { 1678 MutexLockerEx cl(SpaceManager::expand_lock(), 1679 Mutex::_no_safepoint_check_flag); 1680 locked_verify_free_chunks_total(); 1681 } 1682 1683 void ChunkManager::locked_verify_free_chunks_count() { 1684 assert_lock_strong(SpaceManager::expand_lock()); 1685 assert(sum_free_chunks_count() == _free_chunks_count, 1686 err_msg("_free_chunks_count " SIZE_FORMAT " is not the" 1687 " same as sum " SIZE_FORMAT, _free_chunks_count, 1688 sum_free_chunks_count())); 1689 } 1690 1691 void ChunkManager::verify_free_chunks_count() { 1692 #ifdef ASSERT 1693 MutexLockerEx cl(SpaceManager::expand_lock(), 1694 Mutex::_no_safepoint_check_flag); 1695 locked_verify_free_chunks_count(); 1696 #endif 1697 } 1698 1699 void ChunkManager::verify() { 1700 MutexLockerEx cl(SpaceManager::expand_lock(), 1701 Mutex::_no_safepoint_check_flag); 1702 locked_verify(); 1703 } 1704 1705 void ChunkManager::locked_verify() { 1706 locked_verify_free_chunks_count(); 1707 locked_verify_free_chunks_total(); 1708 } 1709 1710 void ChunkManager::locked_print_free_chunks(outputStream* st) { 1711 assert_lock_strong(SpaceManager::expand_lock()); 1712 st->print_cr("Free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1713 _free_chunks_total, _free_chunks_count); 1714 } 1715 1716 void ChunkManager::locked_print_sum_free_chunks(outputStream* st) { 1717 assert_lock_strong(SpaceManager::expand_lock()); 1718 st->print_cr("Sum free chunk total " SIZE_FORMAT " count " SIZE_FORMAT, 1719 sum_free_chunks(), sum_free_chunks_count()); 1720 } 1721 ChunkList* ChunkManager::free_chunks(ChunkIndex index) { 1722 return &_free_chunks[index]; 1723 } 1724 1725 // These methods that sum the free chunk lists are used in printing 1726 // methods that are used in product builds. 1727 size_t ChunkManager::sum_free_chunks() { 1728 assert_lock_strong(SpaceManager::expand_lock()); 1729 size_t result = 0; 1730 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1731 ChunkList* list = free_chunks(i); 1732 1733 if (list == NULL) { 1734 continue; 1735 } 1736 1737 result = result + list->count() * list->size(); 1738 } 1739 result = result + humongous_dictionary()->total_size(); 1740 return result; 1741 } 1742 1743 size_t ChunkManager::sum_free_chunks_count() { 1744 assert_lock_strong(SpaceManager::expand_lock()); 1745 size_t count = 0; 1746 for (ChunkIndex i = ZeroIndex; i < NumberOfFreeLists; i = next_chunk_index(i)) { 1747 ChunkList* list = free_chunks(i); 1748 if (list == NULL) { 1749 continue; 1750 } 1751 count = count + list->count(); 1752 } 1753 count = count + humongous_dictionary()->total_free_blocks(); 1754 return count; 1755 } 1756 1757 ChunkList* ChunkManager::find_free_chunks_list(size_t word_size) { 1758 ChunkIndex index = list_index(word_size); 1759 assert(index < HumongousIndex, "No humongous list"); 1760 return free_chunks(index); 1761 } 1762 1763 Metachunk* ChunkManager::free_chunks_get(size_t word_size) { 1764 assert_lock_strong(SpaceManager::expand_lock()); 1765 1766 slow_locked_verify(); 1767 1768 Metachunk* chunk = NULL; 1769 if (list_index(word_size) != HumongousIndex) { 1770 ChunkList* free_list = find_free_chunks_list(word_size); 1771 assert(free_list != NULL, "Sanity check"); 1772 1773 chunk = free_list->head(); 1774 1775 if (chunk == NULL) { 1776 return NULL; 1777 } 1778 1779 // Remove the chunk as the head of the list. 1780 free_list->remove_chunk(chunk); 1781 1782 if (TraceMetadataChunkAllocation && Verbose) { 1783 gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list " 1784 PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT, 1785 free_list, chunk, chunk->word_size()); 1786 } 1787 } else { 1788 chunk = humongous_dictionary()->get_chunk( 1789 word_size, 1790 FreeBlockDictionary<Metachunk>::atLeast); 1791 1792 if (chunk == NULL) { 1793 return NULL; 1794 } 1795 1796 if (TraceMetadataHumongousAllocation) { 1797 size_t waste = chunk->word_size() - word_size; 1798 gclog_or_tty->print_cr("Free list allocate humongous chunk size " 1799 SIZE_FORMAT " for requested size " SIZE_FORMAT 1800 " waste " SIZE_FORMAT, 1801 chunk->word_size(), word_size, waste); 1802 } 1803 } 1804 1805 // Chunk is being removed from the chunks free list. 1806 dec_free_chunks_total(chunk->word_size()); 1807 1808 // Remove it from the links to this freelist 1809 chunk->set_next(NULL); 1810 chunk->set_prev(NULL); 1811 #ifdef ASSERT 1812 // Chunk is no longer on any freelist. Setting to false make container_count_slow() 1813 // work. 1814 chunk->set_is_tagged_free(false); 1815 #endif 1816 chunk->container()->inc_container_count(); 1817 1818 slow_locked_verify(); 1819 return chunk; 1820 } 1821 1822 Metachunk* ChunkManager::chunk_freelist_allocate(size_t word_size) { 1823 assert_lock_strong(SpaceManager::expand_lock()); 1824 slow_locked_verify(); 1825 1826 // Take from the beginning of the list 1827 Metachunk* chunk = free_chunks_get(word_size); 1828 if (chunk == NULL) { 1829 return NULL; 1830 } 1831 1832 assert((word_size <= chunk->word_size()) || 1833 list_index(chunk->word_size() == HumongousIndex), 1834 "Non-humongous variable sized chunk"); 1835 if (TraceMetadataChunkAllocation) { 1836 size_t list_count; 1837 if (list_index(word_size) < HumongousIndex) { 1838 ChunkList* list = find_free_chunks_list(word_size); 1839 list_count = list->count(); 1840 } else { 1841 list_count = humongous_dictionary()->total_count(); 1842 } 1843 gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk " 1844 PTR_FORMAT " size " SIZE_FORMAT " count " SIZE_FORMAT " ", 1845 this, chunk, chunk->word_size(), list_count); 1846 locked_print_free_chunks(gclog_or_tty); 1847 } 1848 1849 return chunk; 1850 } 1851 1852 void ChunkManager::print_on(outputStream* out) const { 1853 if (PrintFLSStatistics != 0) { 1854 const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics(); 1855 } 1856 } 1857 1858 // SpaceManager methods 1859 1860 void SpaceManager::get_initial_chunk_sizes(Metaspace::MetaspaceType type, 1861 size_t* chunk_word_size, 1862 size_t* class_chunk_word_size) { 1863 switch (type) { 1864 case Metaspace::BootMetaspaceType: 1865 *chunk_word_size = Metaspace::first_chunk_word_size(); 1866 *class_chunk_word_size = Metaspace::first_class_chunk_word_size(); 1867 break; 1868 case Metaspace::ROMetaspaceType: 1869 *chunk_word_size = SharedReadOnlySize / wordSize; 1870 *class_chunk_word_size = ClassSpecializedChunk; 1871 break; 1872 case Metaspace::ReadWriteMetaspaceType: 1873 *chunk_word_size = SharedReadWriteSize / wordSize; 1874 *class_chunk_word_size = ClassSpecializedChunk; 1875 break; 1876 case Metaspace::AnonymousMetaspaceType: 1877 case Metaspace::ReflectionMetaspaceType: 1878 *chunk_word_size = SpecializedChunk; 1879 *class_chunk_word_size = ClassSpecializedChunk; 1880 break; 1881 default: 1882 *chunk_word_size = SmallChunk; 1883 *class_chunk_word_size = ClassSmallChunk; 1884 break; 1885 } 1886 assert(*chunk_word_size != 0 && *class_chunk_word_size != 0, 1887 err_msg("Initial chunks sizes bad: data " SIZE_FORMAT 1888 " class " SIZE_FORMAT, 1889 *chunk_word_size, *class_chunk_word_size)); 1890 } 1891 1892 size_t SpaceManager::sum_free_in_chunks_in_use() const { 1893 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1894 size_t free = 0; 1895 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1896 Metachunk* chunk = chunks_in_use(i); 1897 while (chunk != NULL) { 1898 free += chunk->free_word_size(); 1899 chunk = chunk->next(); 1900 } 1901 } 1902 return free; 1903 } 1904 1905 size_t SpaceManager::sum_waste_in_chunks_in_use() const { 1906 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1907 size_t result = 0; 1908 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1909 result += sum_waste_in_chunks_in_use(i); 1910 } 1911 1912 return result; 1913 } 1914 1915 size_t SpaceManager::sum_waste_in_chunks_in_use(ChunkIndex index) const { 1916 size_t result = 0; 1917 Metachunk* chunk = chunks_in_use(index); 1918 // Count the free space in all the chunk but not the 1919 // current chunk from which allocations are still being done. 1920 while (chunk != NULL) { 1921 if (chunk != current_chunk()) { 1922 result += chunk->free_word_size(); 1923 } 1924 chunk = chunk->next(); 1925 } 1926 return result; 1927 } 1928 1929 size_t SpaceManager::sum_capacity_in_chunks_in_use() const { 1930 // For CMS use "allocated_chunks_words()" which does not need the 1931 // Metaspace lock. For the other collectors sum over the 1932 // lists. Use both methods as a check that "allocated_chunks_words()" 1933 // is correct. That is, sum_capacity_in_chunks() is too expensive 1934 // to use in the product and allocated_chunks_words() should be used 1935 // but allow for checking that allocated_chunks_words() returns the same 1936 // value as sum_capacity_in_chunks_in_use() which is the definitive 1937 // answer. 1938 if (UseConcMarkSweepGC) { 1939 return allocated_chunks_words(); 1940 } else { 1941 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1942 size_t sum = 0; 1943 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1944 Metachunk* chunk = chunks_in_use(i); 1945 while (chunk != NULL) { 1946 sum += chunk->word_size(); 1947 chunk = chunk->next(); 1948 } 1949 } 1950 return sum; 1951 } 1952 } 1953 1954 size_t SpaceManager::sum_count_in_chunks_in_use() { 1955 size_t count = 0; 1956 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1957 count = count + sum_count_in_chunks_in_use(i); 1958 } 1959 1960 return count; 1961 } 1962 1963 size_t SpaceManager::sum_count_in_chunks_in_use(ChunkIndex i) { 1964 size_t count = 0; 1965 Metachunk* chunk = chunks_in_use(i); 1966 while (chunk != NULL) { 1967 count++; 1968 chunk = chunk->next(); 1969 } 1970 return count; 1971 } 1972 1973 1974 size_t SpaceManager::sum_used_in_chunks_in_use() const { 1975 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 1976 size_t used = 0; 1977 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1978 Metachunk* chunk = chunks_in_use(i); 1979 while (chunk != NULL) { 1980 used += chunk->used_word_size(); 1981 chunk = chunk->next(); 1982 } 1983 } 1984 return used; 1985 } 1986 1987 void SpaceManager::locked_print_chunks_in_use_on(outputStream* st) const { 1988 1989 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 1990 Metachunk* chunk = chunks_in_use(i); 1991 st->print("SpaceManager: %s " PTR_FORMAT, 1992 chunk_size_name(i), chunk); 1993 if (chunk != NULL) { 1994 st->print_cr(" free " SIZE_FORMAT, 1995 chunk->free_word_size()); 1996 } else { 1997 st->cr(); 1998 } 1999 } 2000 2001 chunk_manager()->locked_print_free_chunks(st); 2002 chunk_manager()->locked_print_sum_free_chunks(st); 2003 } 2004 2005 size_t SpaceManager::calc_chunk_size(size_t word_size) { 2006 2007 // Decide between a small chunk and a medium chunk. Up to 2008 // _small_chunk_limit small chunks can be allocated but 2009 // once a medium chunk has been allocated, no more small 2010 // chunks will be allocated. 2011 size_t chunk_word_size; 2012 if (chunks_in_use(MediumIndex) == NULL && 2013 sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 2014 chunk_word_size = (size_t) small_chunk_size(); 2015 if (word_size + Metachunk::overhead() > small_chunk_size()) { 2016 chunk_word_size = medium_chunk_size(); 2017 } 2018 } else { 2019 chunk_word_size = medium_chunk_size(); 2020 } 2021 2022 // Might still need a humongous chunk. Enforce 2023 // humongous allocations sizes to be aligned up to 2024 // the smallest chunk size. 2025 size_t if_humongous_sized_chunk = 2026 align_size_up(word_size + Metachunk::overhead(), 2027 smallest_chunk_size()); 2028 chunk_word_size = 2029 MAX2((size_t) chunk_word_size, if_humongous_sized_chunk); 2030 2031 assert(!SpaceManager::is_humongous(word_size) || 2032 chunk_word_size == if_humongous_sized_chunk, 2033 err_msg("Size calculation is wrong, word_size " SIZE_FORMAT 2034 " chunk_word_size " SIZE_FORMAT, 2035 word_size, chunk_word_size)); 2036 if (TraceMetadataHumongousAllocation && 2037 SpaceManager::is_humongous(word_size)) { 2038 gclog_or_tty->print_cr("Metadata humongous allocation:"); 2039 gclog_or_tty->print_cr(" word_size " PTR_FORMAT, word_size); 2040 gclog_or_tty->print_cr(" chunk_word_size " PTR_FORMAT, 2041 chunk_word_size); 2042 gclog_or_tty->print_cr(" chunk overhead " PTR_FORMAT, 2043 Metachunk::overhead()); 2044 } 2045 return chunk_word_size; 2046 } 2047 2048 void SpaceManager::track_metaspace_memory_usage() { 2049 if (is_init_completed()) { 2050 if (is_class()) { 2051 MemoryService::track_compressed_class_memory_usage(); 2052 } 2053 MemoryService::track_metaspace_memory_usage(); 2054 } 2055 } 2056 2057 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) { 2058 assert(vs_list()->current_virtual_space() != NULL, 2059 "Should have been set"); 2060 assert(current_chunk() == NULL || 2061 current_chunk()->allocate(word_size) == NULL, 2062 "Don't need to expand"); 2063 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 2064 2065 if (TraceMetadataChunkAllocation && Verbose) { 2066 size_t words_left = 0; 2067 size_t words_used = 0; 2068 if (current_chunk() != NULL) { 2069 words_left = current_chunk()->free_word_size(); 2070 words_used = current_chunk()->used_word_size(); 2071 } 2072 gclog_or_tty->print_cr("SpaceManager::grow_and_allocate for " SIZE_FORMAT 2073 " words " SIZE_FORMAT " words used " SIZE_FORMAT 2074 " words left", 2075 word_size, words_used, words_left); 2076 } 2077 2078 // Get another chunk out of the virtual space 2079 size_t grow_chunks_by_words = calc_chunk_size(word_size); 2080 Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words); 2081 2082 MetaWord* mem = NULL; 2083 2084 // If a chunk was available, add it to the in-use chunk list 2085 // and do an allocation from it. 2086 if (next != NULL) { 2087 // Add to this manager's list of chunks in use. 2088 add_chunk(next, false); 2089 mem = next->allocate(word_size); 2090 } 2091 2092 // Track metaspace memory usage statistic. 2093 track_metaspace_memory_usage(); 2094 2095 return mem; 2096 } 2097 2098 void SpaceManager::print_on(outputStream* st) const { 2099 2100 for (ChunkIndex i = ZeroIndex; 2101 i < NumberOfInUseLists ; 2102 i = next_chunk_index(i) ) { 2103 st->print_cr(" chunks_in_use " PTR_FORMAT " chunk size " PTR_FORMAT, 2104 chunks_in_use(i), 2105 chunks_in_use(i) == NULL ? 0 : chunks_in_use(i)->word_size()); 2106 } 2107 st->print_cr(" waste: Small " SIZE_FORMAT " Medium " SIZE_FORMAT 2108 " Humongous " SIZE_FORMAT, 2109 sum_waste_in_chunks_in_use(SmallIndex), 2110 sum_waste_in_chunks_in_use(MediumIndex), 2111 sum_waste_in_chunks_in_use(HumongousIndex)); 2112 // block free lists 2113 if (block_freelists() != NULL) { 2114 st->print_cr("total in block free lists " SIZE_FORMAT, 2115 block_freelists()->total_size()); 2116 } 2117 } 2118 2119 SpaceManager::SpaceManager(Metaspace::MetadataType mdtype, 2120 Mutex* lock) : 2121 _mdtype(mdtype), 2122 _allocated_blocks_words(0), 2123 _allocated_chunks_words(0), 2124 _allocated_chunks_count(0), 2125 _lock(lock) 2126 { 2127 initialize(); 2128 } 2129 2130 void SpaceManager::inc_size_metrics(size_t words) { 2131 assert_lock_strong(SpaceManager::expand_lock()); 2132 // Total of allocated Metachunks and allocated Metachunks count 2133 // for each SpaceManager 2134 _allocated_chunks_words = _allocated_chunks_words + words; 2135 _allocated_chunks_count++; 2136 // Global total of capacity in allocated Metachunks 2137 MetaspaceAux::inc_capacity(mdtype(), words); 2138 // Global total of allocated Metablocks. 2139 // used_words_slow() includes the overhead in each 2140 // Metachunk so include it in the used when the 2141 // Metachunk is first added (so only added once per 2142 // Metachunk). 2143 MetaspaceAux::inc_used(mdtype(), Metachunk::overhead()); 2144 } 2145 2146 void SpaceManager::inc_used_metrics(size_t words) { 2147 // Add to the per SpaceManager total 2148 Atomic::add_ptr(words, &_allocated_blocks_words); 2149 // Add to the global total 2150 MetaspaceAux::inc_used(mdtype(), words); 2151 } 2152 2153 void SpaceManager::dec_total_from_size_metrics() { 2154 MetaspaceAux::dec_capacity(mdtype(), allocated_chunks_words()); 2155 MetaspaceAux::dec_used(mdtype(), allocated_blocks_words()); 2156 // Also deduct the overhead per Metachunk 2157 MetaspaceAux::dec_used(mdtype(), allocated_chunks_count() * Metachunk::overhead()); 2158 } 2159 2160 void SpaceManager::initialize() { 2161 Metadebug::init_allocation_fail_alot_count(); 2162 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2163 _chunks_in_use[i] = NULL; 2164 } 2165 _current_chunk = NULL; 2166 if (TraceMetadataChunkAllocation && Verbose) { 2167 gclog_or_tty->print_cr("SpaceManager(): " PTR_FORMAT, this); 2168 } 2169 } 2170 2171 void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) { 2172 if (chunks == NULL) { 2173 return; 2174 } 2175 ChunkList* list = free_chunks(index); 2176 assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes"); 2177 assert_lock_strong(SpaceManager::expand_lock()); 2178 Metachunk* cur = chunks; 2179 2180 // This returns chunks one at a time. If a new 2181 // class List can be created that is a base class 2182 // of FreeList then something like FreeList::prepend() 2183 // can be used in place of this loop 2184 while (cur != NULL) { 2185 assert(cur->container() != NULL, "Container should have been set"); 2186 cur->container()->dec_container_count(); 2187 // Capture the next link before it is changed 2188 // by the call to return_chunk_at_head(); 2189 Metachunk* next = cur->next(); 2190 DEBUG_ONLY(cur->set_is_tagged_free(true);) 2191 list->return_chunk_at_head(cur); 2192 cur = next; 2193 } 2194 } 2195 2196 SpaceManager::~SpaceManager() { 2197 // This call this->_lock which can't be done while holding expand_lock() 2198 assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(), 2199 err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT 2200 " allocated_chunks_words() " SIZE_FORMAT, 2201 sum_capacity_in_chunks_in_use(), allocated_chunks_words())); 2202 2203 MutexLockerEx fcl(SpaceManager::expand_lock(), 2204 Mutex::_no_safepoint_check_flag); 2205 2206 chunk_manager()->slow_locked_verify(); 2207 2208 dec_total_from_size_metrics(); 2209 2210 if (TraceMetadataChunkAllocation && Verbose) { 2211 gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this); 2212 locked_print_chunks_in_use_on(gclog_or_tty); 2213 } 2214 2215 // Do not mangle freed Metachunks. The chunk size inside Metachunks 2216 // is during the freeing of a VirtualSpaceNodes. 2217 2218 // Have to update before the chunks_in_use lists are emptied 2219 // below. 2220 chunk_manager()->inc_free_chunks_total(allocated_chunks_words(), 2221 sum_count_in_chunks_in_use()); 2222 2223 // Add all the chunks in use by this space manager 2224 // to the global list of free chunks. 2225 2226 // Follow each list of chunks-in-use and add them to the 2227 // free lists. Each list is NULL terminated. 2228 2229 for (ChunkIndex i = ZeroIndex; i < HumongousIndex; i = next_chunk_index(i)) { 2230 if (TraceMetadataChunkAllocation && Verbose) { 2231 gclog_or_tty->print_cr("returned %d %s chunks to freelist", 2232 sum_count_in_chunks_in_use(i), 2233 chunk_size_name(i)); 2234 } 2235 Metachunk* chunks = chunks_in_use(i); 2236 chunk_manager()->return_chunks(i, chunks); 2237 set_chunks_in_use(i, NULL); 2238 if (TraceMetadataChunkAllocation && Verbose) { 2239 gclog_or_tty->print_cr("updated freelist count %d %s", 2240 chunk_manager()->free_chunks(i)->count(), 2241 chunk_size_name(i)); 2242 } 2243 assert(i != HumongousIndex, "Humongous chunks are handled explicitly later"); 2244 } 2245 2246 // The medium chunk case may be optimized by passing the head and 2247 // tail of the medium chunk list to add_at_head(). The tail is often 2248 // the current chunk but there are probably exceptions. 2249 2250 // Humongous chunks 2251 if (TraceMetadataChunkAllocation && Verbose) { 2252 gclog_or_tty->print_cr("returned %d %s humongous chunks to dictionary", 2253 sum_count_in_chunks_in_use(HumongousIndex), 2254 chunk_size_name(HumongousIndex)); 2255 gclog_or_tty->print("Humongous chunk dictionary: "); 2256 } 2257 // Humongous chunks are never the current chunk. 2258 Metachunk* humongous_chunks = chunks_in_use(HumongousIndex); 2259 2260 while (humongous_chunks != NULL) { 2261 #ifdef ASSERT 2262 humongous_chunks->set_is_tagged_free(true); 2263 #endif 2264 if (TraceMetadataChunkAllocation && Verbose) { 2265 gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", 2266 humongous_chunks, 2267 humongous_chunks->word_size()); 2268 } 2269 assert(humongous_chunks->word_size() == (size_t) 2270 align_size_up(humongous_chunks->word_size(), 2271 smallest_chunk_size()), 2272 err_msg("Humongous chunk size is wrong: word size " SIZE_FORMAT 2273 " granularity %d", 2274 humongous_chunks->word_size(), smallest_chunk_size())); 2275 Metachunk* next_humongous_chunks = humongous_chunks->next(); 2276 humongous_chunks->container()->dec_container_count(); 2277 chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks); 2278 humongous_chunks = next_humongous_chunks; 2279 } 2280 if (TraceMetadataChunkAllocation && Verbose) { 2281 gclog_or_tty->cr(); 2282 gclog_or_tty->print_cr("updated dictionary count %d %s", 2283 chunk_manager()->humongous_dictionary()->total_count(), 2284 chunk_size_name(HumongousIndex)); 2285 } 2286 chunk_manager()->slow_locked_verify(); 2287 } 2288 2289 const char* SpaceManager::chunk_size_name(ChunkIndex index) const { 2290 switch (index) { 2291 case SpecializedIndex: 2292 return "Specialized"; 2293 case SmallIndex: 2294 return "Small"; 2295 case MediumIndex: 2296 return "Medium"; 2297 case HumongousIndex: 2298 return "Humongous"; 2299 default: 2300 return NULL; 2301 } 2302 } 2303 2304 ChunkIndex ChunkManager::list_index(size_t size) { 2305 switch (size) { 2306 case SpecializedChunk: 2307 assert(SpecializedChunk == ClassSpecializedChunk, 2308 "Need branch for ClassSpecializedChunk"); 2309 return SpecializedIndex; 2310 case SmallChunk: 2311 case ClassSmallChunk: 2312 return SmallIndex; 2313 case MediumChunk: 2314 case ClassMediumChunk: 2315 return MediumIndex; 2316 default: 2317 assert(size > MediumChunk || size > ClassMediumChunk, 2318 "Not a humongous chunk"); 2319 return HumongousIndex; 2320 } 2321 } 2322 2323 void SpaceManager::deallocate(MetaWord* p, size_t word_size) { 2324 assert_lock_strong(_lock); 2325 size_t raw_word_size = get_raw_word_size(word_size); 2326 size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size(); 2327 assert(raw_word_size >= min_size, 2328 err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size)); 2329 block_freelists()->return_block(p, raw_word_size); 2330 } 2331 2332 // Adds a chunk to the list of chunks in use. 2333 void SpaceManager::add_chunk(Metachunk* new_chunk, bool make_current) { 2334 2335 assert(new_chunk != NULL, "Should not be NULL"); 2336 assert(new_chunk->next() == NULL, "Should not be on a list"); 2337 2338 new_chunk->reset_empty(); 2339 2340 // Find the correct list and and set the current 2341 // chunk for that list. 2342 ChunkIndex index = ChunkManager::list_index(new_chunk->word_size()); 2343 2344 if (index != HumongousIndex) { 2345 retire_current_chunk(); 2346 set_current_chunk(new_chunk); 2347 new_chunk->set_next(chunks_in_use(index)); 2348 set_chunks_in_use(index, new_chunk); 2349 } else { 2350 // For null class loader data and DumpSharedSpaces, the first chunk isn't 2351 // small, so small will be null. Link this first chunk as the current 2352 // chunk. 2353 if (make_current) { 2354 // Set as the current chunk but otherwise treat as a humongous chunk. 2355 set_current_chunk(new_chunk); 2356 } 2357 // Link at head. The _current_chunk only points to a humongous chunk for 2358 // the null class loader metaspace (class and data virtual space managers) 2359 // any humongous chunks so will not point to the tail 2360 // of the humongous chunks list. 2361 new_chunk->set_next(chunks_in_use(HumongousIndex)); 2362 set_chunks_in_use(HumongousIndex, new_chunk); 2363 2364 assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency"); 2365 } 2366 2367 // Add to the running sum of capacity 2368 inc_size_metrics(new_chunk->word_size()); 2369 2370 assert(new_chunk->is_empty(), "Not ready for reuse"); 2371 if (TraceMetadataChunkAllocation && Verbose) { 2372 gclog_or_tty->print("SpaceManager::add_chunk: %d) ", 2373 sum_count_in_chunks_in_use()); 2374 new_chunk->print_on(gclog_or_tty); 2375 chunk_manager()->locked_print_free_chunks(gclog_or_tty); 2376 } 2377 } 2378 2379 void SpaceManager::retire_current_chunk() { 2380 if (current_chunk() != NULL) { 2381 size_t remaining_words = current_chunk()->free_word_size(); 2382 if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 2383 block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words); 2384 inc_used_metrics(remaining_words); 2385 } 2386 } 2387 } 2388 2389 Metachunk* SpaceManager::get_new_chunk(size_t word_size, 2390 size_t grow_chunks_by_words) { 2391 // Get a chunk from the chunk freelist 2392 Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words); 2393 2394 if (next == NULL) { 2395 next = vs_list()->get_new_chunk(word_size, 2396 grow_chunks_by_words, 2397 medium_chunk_bunch()); 2398 } 2399 2400 if (TraceMetadataHumongousAllocation && next != NULL && 2401 SpaceManager::is_humongous(next->word_size())) { 2402 gclog_or_tty->print_cr(" new humongous chunk word size " 2403 PTR_FORMAT, next->word_size()); 2404 } 2405 2406 return next; 2407 } 2408 2409 MetaWord* SpaceManager::allocate(size_t word_size) { 2410 MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag); 2411 2412 size_t raw_word_size = get_raw_word_size(word_size); 2413 BlockFreelist* fl = block_freelists(); 2414 MetaWord* p = NULL; 2415 // Allocation from the dictionary is expensive in the sense that 2416 // the dictionary has to be searched for a size. Don't allocate 2417 // from the dictionary until it starts to get fat. Is this 2418 // a reasonable policy? Maybe an skinny dictionary is fast enough 2419 // for allocations. Do some profiling. JJJ 2420 if (fl->total_size() > allocation_from_dictionary_limit) { 2421 p = fl->get_block(raw_word_size); 2422 } 2423 if (p == NULL) { 2424 p = allocate_work(raw_word_size); 2425 } 2426 2427 return p; 2428 } 2429 2430 // Returns the address of spaced allocated for "word_size". 2431 // This methods does not know about blocks (Metablocks) 2432 MetaWord* SpaceManager::allocate_work(size_t word_size) { 2433 assert_lock_strong(_lock); 2434 #ifdef ASSERT 2435 if (Metadebug::test_metadata_failure()) { 2436 return NULL; 2437 } 2438 #endif 2439 // Is there space in the current chunk? 2440 MetaWord* result = NULL; 2441 2442 // For DumpSharedSpaces, only allocate out of the current chunk which is 2443 // never null because we gave it the size we wanted. Caller reports out 2444 // of memory if this returns null. 2445 if (DumpSharedSpaces) { 2446 assert(current_chunk() != NULL, "should never happen"); 2447 inc_used_metrics(word_size); 2448 return current_chunk()->allocate(word_size); // caller handles null result 2449 } 2450 2451 if (current_chunk() != NULL) { 2452 result = current_chunk()->allocate(word_size); 2453 } 2454 2455 if (result == NULL) { 2456 result = grow_and_allocate(word_size); 2457 } 2458 2459 if (result != NULL) { 2460 inc_used_metrics(word_size); 2461 assert(result != (MetaWord*) chunks_in_use(MediumIndex), 2462 "Head of the list is being allocated"); 2463 } 2464 2465 return result; 2466 } 2467 2468 void SpaceManager::verify() { 2469 // If there are blocks in the dictionary, then 2470 // verification of chunks does not work since 2471 // being in the dictionary alters a chunk. 2472 if (block_freelists()->total_size() == 0) { 2473 for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) { 2474 Metachunk* curr = chunks_in_use(i); 2475 while (curr != NULL) { 2476 curr->verify(); 2477 verify_chunk_size(curr); 2478 curr = curr->next(); 2479 } 2480 } 2481 } 2482 } 2483 2484 void SpaceManager::verify_chunk_size(Metachunk* chunk) { 2485 assert(is_humongous(chunk->word_size()) || 2486 chunk->word_size() == medium_chunk_size() || 2487 chunk->word_size() == small_chunk_size() || 2488 chunk->word_size() == specialized_chunk_size(), 2489 "Chunk size is wrong"); 2490 return; 2491 } 2492 2493 #ifdef ASSERT 2494 void SpaceManager::verify_allocated_blocks_words() { 2495 // Verification is only guaranteed at a safepoint. 2496 assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(), 2497 "Verification can fail if the applications is running"); 2498 assert(allocated_blocks_words() == sum_used_in_chunks_in_use(), 2499 err_msg("allocation total is not consistent " SIZE_FORMAT 2500 " vs " SIZE_FORMAT, 2501 allocated_blocks_words(), sum_used_in_chunks_in_use())); 2502 } 2503 2504 #endif 2505 2506 void SpaceManager::dump(outputStream* const out) const { 2507 size_t curr_total = 0; 2508 size_t waste = 0; 2509 uint i = 0; 2510 size_t used = 0; 2511 size_t capacity = 0; 2512 2513 // Add up statistics for all chunks in this SpaceManager. 2514 for (ChunkIndex index = ZeroIndex; 2515 index < NumberOfInUseLists; 2516 index = next_chunk_index(index)) { 2517 for (Metachunk* curr = chunks_in_use(index); 2518 curr != NULL; 2519 curr = curr->next()) { 2520 out->print("%d) ", i++); 2521 curr->print_on(out); 2522 curr_total += curr->word_size(); 2523 used += curr->used_word_size(); 2524 capacity += curr->word_size(); 2525 waste += curr->free_word_size() + curr->overhead();; 2526 } 2527 } 2528 2529 if (TraceMetadataChunkAllocation && Verbose) { 2530 block_freelists()->print_on(out); 2531 } 2532 2533 size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size(); 2534 // Free space isn't wasted. 2535 waste -= free; 2536 2537 out->print_cr("total of all chunks " SIZE_FORMAT " used " SIZE_FORMAT 2538 " free " SIZE_FORMAT " capacity " SIZE_FORMAT 2539 " waste " SIZE_FORMAT, curr_total, used, free, capacity, waste); 2540 } 2541 2542 #ifndef PRODUCT 2543 void SpaceManager::mangle_freed_chunks() { 2544 for (ChunkIndex index = ZeroIndex; 2545 index < NumberOfInUseLists; 2546 index = next_chunk_index(index)) { 2547 for (Metachunk* curr = chunks_in_use(index); 2548 curr != NULL; 2549 curr = curr->next()) { 2550 curr->mangle(); 2551 } 2552 } 2553 } 2554 #endif // PRODUCT 2555 2556 // MetaspaceAux 2557 2558 2559 size_t MetaspaceAux::_capacity_words[] = {0, 0}; 2560 size_t MetaspaceAux::_used_words[] = {0, 0}; 2561 2562 size_t MetaspaceAux::free_bytes(Metaspace::MetadataType mdtype) { 2563 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2564 return list == NULL ? 0 : list->free_bytes(); 2565 } 2566 2567 size_t MetaspaceAux::free_bytes() { 2568 return free_bytes(Metaspace::ClassType) + free_bytes(Metaspace::NonClassType); 2569 } 2570 2571 void MetaspaceAux::dec_capacity(Metaspace::MetadataType mdtype, size_t words) { 2572 assert_lock_strong(SpaceManager::expand_lock()); 2573 assert(words <= capacity_words(mdtype), 2574 err_msg("About to decrement below 0: words " SIZE_FORMAT 2575 " is greater than _capacity_words[%u] " SIZE_FORMAT, 2576 words, mdtype, capacity_words(mdtype))); 2577 _capacity_words[mdtype] -= words; 2578 } 2579 2580 void MetaspaceAux::inc_capacity(Metaspace::MetadataType mdtype, size_t words) { 2581 assert_lock_strong(SpaceManager::expand_lock()); 2582 // Needs to be atomic 2583 _capacity_words[mdtype] += words; 2584 } 2585 2586 void MetaspaceAux::dec_used(Metaspace::MetadataType mdtype, size_t words) { 2587 assert(words <= used_words(mdtype), 2588 err_msg("About to decrement below 0: words " SIZE_FORMAT 2589 " is greater than _used_words[%u] " SIZE_FORMAT, 2590 words, mdtype, used_words(mdtype))); 2591 // For CMS deallocation of the Metaspaces occurs during the 2592 // sweep which is a concurrent phase. Protection by the expand_lock() 2593 // is not enough since allocation is on a per Metaspace basis 2594 // and protected by the Metaspace lock. 2595 jlong minus_words = (jlong) - (jlong) words; 2596 Atomic::add_ptr(minus_words, &_used_words[mdtype]); 2597 } 2598 2599 void MetaspaceAux::inc_used(Metaspace::MetadataType mdtype, size_t words) { 2600 // _used_words tracks allocations for 2601 // each piece of metadata. Those allocations are 2602 // generally done concurrently by different application 2603 // threads so must be done atomically. 2604 Atomic::add_ptr(words, &_used_words[mdtype]); 2605 } 2606 2607 size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) { 2608 size_t used = 0; 2609 ClassLoaderDataGraphMetaspaceIterator iter; 2610 while (iter.repeat()) { 2611 Metaspace* msp = iter.get_next(); 2612 // Sum allocated_blocks_words for each metaspace 2613 if (msp != NULL) { 2614 used += msp->used_words_slow(mdtype); 2615 } 2616 } 2617 return used * BytesPerWord; 2618 } 2619 2620 size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) { 2621 size_t free = 0; 2622 ClassLoaderDataGraphMetaspaceIterator iter; 2623 while (iter.repeat()) { 2624 Metaspace* msp = iter.get_next(); 2625 if (msp != NULL) { 2626 free += msp->free_words_slow(mdtype); 2627 } 2628 } 2629 return free * BytesPerWord; 2630 } 2631 2632 size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) { 2633 if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) { 2634 return 0; 2635 } 2636 // Don't count the space in the freelists. That space will be 2637 // added to the capacity calculation as needed. 2638 size_t capacity = 0; 2639 ClassLoaderDataGraphMetaspaceIterator iter; 2640 while (iter.repeat()) { 2641 Metaspace* msp = iter.get_next(); 2642 if (msp != NULL) { 2643 capacity += msp->capacity_words_slow(mdtype); 2644 } 2645 } 2646 return capacity * BytesPerWord; 2647 } 2648 2649 size_t MetaspaceAux::capacity_bytes_slow() { 2650 #ifdef PRODUCT 2651 // Use capacity_bytes() in PRODUCT instead of this function. 2652 guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT"); 2653 #endif 2654 size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType); 2655 size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType); 2656 assert(capacity_bytes() == class_capacity + non_class_capacity, 2657 err_msg("bad accounting: capacity_bytes() " SIZE_FORMAT 2658 " class_capacity + non_class_capacity " SIZE_FORMAT 2659 " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT, 2660 capacity_bytes(), class_capacity + non_class_capacity, 2661 class_capacity, non_class_capacity)); 2662 2663 return class_capacity + non_class_capacity; 2664 } 2665 2666 size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) { 2667 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2668 return list == NULL ? 0 : list->reserved_bytes(); 2669 } 2670 2671 size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) { 2672 VirtualSpaceList* list = Metaspace::get_space_list(mdtype); 2673 return list == NULL ? 0 : list->committed_bytes(); 2674 } 2675 2676 size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); } 2677 2678 size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) { 2679 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype); 2680 if (chunk_manager == NULL) { 2681 return 0; 2682 } 2683 chunk_manager->slow_verify(); 2684 return chunk_manager->free_chunks_total_words(); 2685 } 2686 2687 size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) { 2688 return free_chunks_total_words(mdtype) * BytesPerWord; 2689 } 2690 2691 size_t MetaspaceAux::free_chunks_total_words() { 2692 return free_chunks_total_words(Metaspace::ClassType) + 2693 free_chunks_total_words(Metaspace::NonClassType); 2694 } 2695 2696 size_t MetaspaceAux::free_chunks_total_bytes() { 2697 return free_chunks_total_words() * BytesPerWord; 2698 } 2699 2700 bool MetaspaceAux::has_chunk_free_list(Metaspace::MetadataType mdtype) { 2701 return Metaspace::get_chunk_manager(mdtype) != NULL; 2702 } 2703 2704 MetaspaceChunkFreeListSummary MetaspaceAux::chunk_free_list_summary(Metaspace::MetadataType mdtype) { 2705 if (!has_chunk_free_list(mdtype)) { 2706 return MetaspaceChunkFreeListSummary(); 2707 } 2708 2709 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype); 2710 return cm->chunk_free_list_summary(); 2711 } 2712 2713 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) { 2714 gclog_or_tty->print(", [Metaspace:"); 2715 if (PrintGCDetails && Verbose) { 2716 gclog_or_tty->print(" " SIZE_FORMAT 2717 "->" SIZE_FORMAT 2718 "(" SIZE_FORMAT ")", 2719 prev_metadata_used, 2720 used_bytes(), 2721 reserved_bytes()); 2722 } else { 2723 gclog_or_tty->print(" " SIZE_FORMAT "K" 2724 "->" SIZE_FORMAT "K" 2725 "(" SIZE_FORMAT "K)", 2726 prev_metadata_used/K, 2727 used_bytes()/K, 2728 reserved_bytes()/K); 2729 } 2730 2731 gclog_or_tty->print("]"); 2732 } 2733 2734 // This is printed when PrintGCDetails 2735 void MetaspaceAux::print_on(outputStream* out) { 2736 Metaspace::MetadataType nct = Metaspace::NonClassType; 2737 2738 out->print_cr(" Metaspace " 2739 "used " SIZE_FORMAT "K, " 2740 "capacity " SIZE_FORMAT "K, " 2741 "committed " SIZE_FORMAT "K, " 2742 "reserved " SIZE_FORMAT "K", 2743 used_bytes()/K, 2744 capacity_bytes()/K, 2745 committed_bytes()/K, 2746 reserved_bytes()/K); 2747 2748 if (Metaspace::using_class_space()) { 2749 Metaspace::MetadataType ct = Metaspace::ClassType; 2750 out->print_cr(" class space " 2751 "used " SIZE_FORMAT "K, " 2752 "capacity " SIZE_FORMAT "K, " 2753 "committed " SIZE_FORMAT "K, " 2754 "reserved " SIZE_FORMAT "K", 2755 used_bytes(ct)/K, 2756 capacity_bytes(ct)/K, 2757 committed_bytes(ct)/K, 2758 reserved_bytes(ct)/K); 2759 } 2760 } 2761 2762 // Print information for class space and data space separately. 2763 // This is almost the same as above. 2764 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) { 2765 size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype); 2766 size_t capacity_bytes = capacity_bytes_slow(mdtype); 2767 size_t used_bytes = used_bytes_slow(mdtype); 2768 size_t free_bytes = free_bytes_slow(mdtype); 2769 size_t used_and_free = used_bytes + free_bytes + 2770 free_chunks_capacity_bytes; 2771 out->print_cr(" Chunk accounting: used in chunks " SIZE_FORMAT 2772 "K + unused in chunks " SIZE_FORMAT "K + " 2773 " capacity in free chunks " SIZE_FORMAT "K = " SIZE_FORMAT 2774 "K capacity in allocated chunks " SIZE_FORMAT "K", 2775 used_bytes / K, 2776 free_bytes / K, 2777 free_chunks_capacity_bytes / K, 2778 used_and_free / K, 2779 capacity_bytes / K); 2780 // Accounting can only be correct if we got the values during a safepoint 2781 assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong"); 2782 } 2783 2784 // Print total fragmentation for class metaspaces 2785 void MetaspaceAux::print_class_waste(outputStream* out) { 2786 assert(Metaspace::using_class_space(), "class metaspace not used"); 2787 size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 2788 size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 2789 ClassLoaderDataGraphMetaspaceIterator iter; 2790 while (iter.repeat()) { 2791 Metaspace* msp = iter.get_next(); 2792 if (msp != NULL) { 2793 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2794 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2795 cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2796 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 2797 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2798 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 2799 cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2800 } 2801 } 2802 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2803 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2804 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2805 "large count " SIZE_FORMAT, 2806 cls_specialized_count, cls_specialized_waste, 2807 cls_small_count, cls_small_waste, 2808 cls_medium_count, cls_medium_waste, cls_humongous_count); 2809 } 2810 2811 // Print total fragmentation for data and class metaspaces separately 2812 void MetaspaceAux::print_waste(outputStream* out) { 2813 size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 2814 size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 2815 2816 ClassLoaderDataGraphMetaspaceIterator iter; 2817 while (iter.repeat()) { 2818 Metaspace* msp = iter.get_next(); 2819 if (msp != NULL) { 2820 specialized_waste += msp->vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 2821 specialized_count += msp->vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 2822 small_waste += msp->vsm()->sum_waste_in_chunks_in_use(SmallIndex); 2823 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 2824 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 2825 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 2826 humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 2827 } 2828 } 2829 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 2830 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 2831 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 2832 SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 2833 "large count " SIZE_FORMAT, 2834 specialized_count, specialized_waste, small_count, 2835 small_waste, medium_count, medium_waste, humongous_count); 2836 if (Metaspace::using_class_space()) { 2837 print_class_waste(out); 2838 } 2839 } 2840 2841 // Dump global metaspace things from the end of ClassLoaderDataGraph 2842 void MetaspaceAux::dump(outputStream* out) { 2843 out->print_cr("All Metaspace:"); 2844 out->print("data space: "); print_on(out, Metaspace::NonClassType); 2845 out->print("class space: "); print_on(out, Metaspace::ClassType); 2846 print_waste(out); 2847 } 2848 2849 void MetaspaceAux::verify_free_chunks() { 2850 Metaspace::chunk_manager_metadata()->verify(); 2851 if (Metaspace::using_class_space()) { 2852 Metaspace::chunk_manager_class()->verify(); 2853 } 2854 } 2855 2856 void MetaspaceAux::verify_capacity() { 2857 #ifdef ASSERT 2858 size_t running_sum_capacity_bytes = capacity_bytes(); 2859 // For purposes of the running sum of capacity, verify against capacity 2860 size_t capacity_in_use_bytes = capacity_bytes_slow(); 2861 assert(running_sum_capacity_bytes == capacity_in_use_bytes, 2862 err_msg("capacity_words() * BytesPerWord " SIZE_FORMAT 2863 " capacity_bytes_slow()" SIZE_FORMAT, 2864 running_sum_capacity_bytes, capacity_in_use_bytes)); 2865 for (Metaspace::MetadataType i = Metaspace::ClassType; 2866 i < Metaspace:: MetadataTypeCount; 2867 i = (Metaspace::MetadataType)(i + 1)) { 2868 size_t capacity_in_use_bytes = capacity_bytes_slow(i); 2869 assert(capacity_bytes(i) == capacity_in_use_bytes, 2870 err_msg("capacity_bytes(%u) " SIZE_FORMAT 2871 " capacity_bytes_slow(%u)" SIZE_FORMAT, 2872 i, capacity_bytes(i), i, capacity_in_use_bytes)); 2873 } 2874 #endif 2875 } 2876 2877 void MetaspaceAux::verify_used() { 2878 #ifdef ASSERT 2879 size_t running_sum_used_bytes = used_bytes(); 2880 // For purposes of the running sum of used, verify against used 2881 size_t used_in_use_bytes = used_bytes_slow(); 2882 assert(used_bytes() == used_in_use_bytes, 2883 err_msg("used_bytes() " SIZE_FORMAT 2884 " used_bytes_slow()" SIZE_FORMAT, 2885 used_bytes(), used_in_use_bytes)); 2886 for (Metaspace::MetadataType i = Metaspace::ClassType; 2887 i < Metaspace:: MetadataTypeCount; 2888 i = (Metaspace::MetadataType)(i + 1)) { 2889 size_t used_in_use_bytes = used_bytes_slow(i); 2890 assert(used_bytes(i) == used_in_use_bytes, 2891 err_msg("used_bytes(%u) " SIZE_FORMAT 2892 " used_bytes_slow(%u)" SIZE_FORMAT, 2893 i, used_bytes(i), i, used_in_use_bytes)); 2894 } 2895 #endif 2896 } 2897 2898 void MetaspaceAux::verify_metrics() { 2899 verify_capacity(); 2900 verify_used(); 2901 } 2902 2903 2904 // Metaspace methods 2905 2906 size_t Metaspace::_first_chunk_word_size = 0; 2907 size_t Metaspace::_first_class_chunk_word_size = 0; 2908 2909 size_t Metaspace::_commit_alignment = 0; 2910 size_t Metaspace::_reserve_alignment = 0; 2911 2912 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) { 2913 initialize(lock, type); 2914 } 2915 2916 Metaspace::~Metaspace() { 2917 delete _vsm; 2918 if (using_class_space()) { 2919 delete _class_vsm; 2920 } 2921 } 2922 2923 VirtualSpaceList* Metaspace::_space_list = NULL; 2924 VirtualSpaceList* Metaspace::_class_space_list = NULL; 2925 2926 ChunkManager* Metaspace::_chunk_manager_metadata = NULL; 2927 ChunkManager* Metaspace::_chunk_manager_class = NULL; 2928 2929 #define VIRTUALSPACEMULTIPLIER 2 2930 2931 #ifdef _LP64 2932 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); 2933 2934 void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) { 2935 // Figure out the narrow_klass_base and the narrow_klass_shift. The 2936 // narrow_klass_base is the lower of the metaspace base and the cds base 2937 // (if cds is enabled). The narrow_klass_shift depends on the distance 2938 // between the lower base and higher address. 2939 address lower_base; 2940 address higher_address; 2941 if (UseSharedSpaces) { 2942 higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2943 (address)(metaspace_base + compressed_class_space_size())); 2944 lower_base = MIN2(metaspace_base, cds_base); 2945 } else { 2946 higher_address = metaspace_base + compressed_class_space_size(); 2947 lower_base = metaspace_base; 2948 2949 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes; 2950 // If compressed class space fits in lower 32G, we don't need a base. 2951 if (higher_address <= (address)klass_encoding_max) { 2952 lower_base = 0; // Effectively lower base is zero. 2953 } 2954 } 2955 2956 Universe::set_narrow_klass_base(lower_base); 2957 2958 if ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) { 2959 Universe::set_narrow_klass_shift(0); 2960 } else { 2961 assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); 2962 Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); 2963 } 2964 } 2965 2966 // Return TRUE if the specified metaspace_base and cds_base are close enough 2967 // to work with compressed klass pointers. 2968 bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) { 2969 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS"); 2970 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2971 address lower_base = MIN2((address)metaspace_base, cds_base); 2972 address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()), 2973 (address)(metaspace_base + compressed_class_space_size())); 2974 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax); 2975 } 2976 2977 // Try to allocate the metaspace at the requested addr. 2978 void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) { 2979 assert(using_class_space(), "called improperly"); 2980 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs"); 2981 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax, 2982 "Metaspace size is too big"); 2983 assert_is_ptr_aligned(requested_addr, _reserve_alignment); 2984 assert_is_ptr_aligned(cds_base, _reserve_alignment); 2985 assert_is_size_aligned(compressed_class_space_size(), _reserve_alignment); 2986 2987 // Don't use large pages for the class space. 2988 bool large_pages = false; 2989 2990 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(), 2991 _reserve_alignment, 2992 large_pages, 2993 requested_addr, 0); 2994 if (!metaspace_rs.is_reserved()) { 2995 if (UseSharedSpaces) { 2996 size_t increment = align_size_up(1*G, _reserve_alignment); 2997 2998 // Keep trying to allocate the metaspace, increasing the requested_addr 2999 // by 1GB each time, until we reach an address that will no longer allow 3000 // use of CDS with compressed klass pointers. 3001 char *addr = requested_addr; 3002 while (!metaspace_rs.is_reserved() && (addr + increment > addr) && 3003 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) { 3004 addr = addr + increment; 3005 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3006 _reserve_alignment, large_pages, addr, 0); 3007 } 3008 } 3009 3010 // If no successful allocation then try to allocate the space anywhere. If 3011 // that fails then OOM doom. At this point we cannot try allocating the 3012 // metaspace as if UseCompressedClassPointers is off because too much 3013 // initialization has happened that depends on UseCompressedClassPointers. 3014 // So, UseCompressedClassPointers cannot be turned off at this point. 3015 if (!metaspace_rs.is_reserved()) { 3016 metaspace_rs = ReservedSpace(compressed_class_space_size(), 3017 _reserve_alignment, large_pages); 3018 if (!metaspace_rs.is_reserved()) { 3019 vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes", 3020 compressed_class_space_size())); 3021 } 3022 } 3023 } 3024 3025 // If we got here then the metaspace got allocated. 3026 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass); 3027 3028 // Verify that we can use shared spaces. Otherwise, turn off CDS. 3029 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) { 3030 FileMapInfo::stop_sharing_and_unmap( 3031 "Could not allocate metaspace at a compatible address"); 3032 } 3033 3034 set_narrow_klass_base_and_shift((address)metaspace_rs.base(), 3035 UseSharedSpaces ? (address)cds_base : 0); 3036 3037 initialize_class_space(metaspace_rs); 3038 3039 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 3040 gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT, 3041 Universe::narrow_klass_base(), Universe::narrow_klass_shift()); 3042 gclog_or_tty->print_cr("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT, 3043 compressed_class_space_size(), metaspace_rs.base(), requested_addr); 3044 } 3045 } 3046 3047 // For UseCompressedClassPointers the class space is reserved above the top of 3048 // the Java heap. The argument passed in is at the base of the compressed space. 3049 void Metaspace::initialize_class_space(ReservedSpace rs) { 3050 // The reserved space size may be bigger because of alignment, esp with UseLargePages 3051 assert(rs.size() >= CompressedClassSpaceSize, 3052 err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize)); 3053 assert(using_class_space(), "Must be using class space"); 3054 _class_space_list = new VirtualSpaceList(rs); 3055 _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk); 3056 3057 if (!_class_space_list->initialization_succeeded()) { 3058 vm_exit_during_initialization("Failed to setup compressed class space virtual space list."); 3059 } 3060 } 3061 3062 #endif 3063 3064 void Metaspace::ergo_initialize() { 3065 if (DumpSharedSpaces) { 3066 // Using large pages when dumping the shared archive is currently not implemented. 3067 FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false); 3068 } 3069 3070 size_t page_size = os::vm_page_size(); 3071 if (UseLargePages && UseLargePagesInMetaspace) { 3072 page_size = os::large_page_size(); 3073 } 3074 3075 _commit_alignment = page_size; 3076 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); 3077 3078 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will 3079 // override if MaxMetaspaceSize was set on the command line or not. 3080 // This information is needed later to conform to the specification of the 3081 // java.lang.management.MemoryUsage API. 3082 // 3083 // Ideally, we would be able to set the default value of MaxMetaspaceSize in 3084 // globals.hpp to the aligned value, but this is not possible, since the 3085 // alignment depends on other flags being parsed. 3086 MaxMetaspaceSize = align_size_down_bounded(MaxMetaspaceSize, _reserve_alignment); 3087 3088 if (MetaspaceSize > MaxMetaspaceSize) { 3089 MetaspaceSize = MaxMetaspaceSize; 3090 } 3091 3092 MetaspaceSize = align_size_down_bounded(MetaspaceSize, _commit_alignment); 3093 3094 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize"); 3095 3096 if (MetaspaceSize < 256*K) { 3097 vm_exit_during_initialization("Too small initial Metaspace size"); 3098 } 3099 3100 MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment); 3101 MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment); 3102 3103 CompressedClassSpaceSize = align_size_down_bounded(CompressedClassSpaceSize, _reserve_alignment); 3104 set_compressed_class_space_size(CompressedClassSpaceSize); 3105 } 3106 3107 void Metaspace::global_initialize() { 3108 MetaspaceGC::initialize(); 3109 3110 // Initialize the alignment for shared spaces. 3111 int max_alignment = os::vm_allocation_granularity(); 3112 size_t cds_total = 0; 3113 3114 MetaspaceShared::set_max_alignment(max_alignment); 3115 3116 if (DumpSharedSpaces) { 3117 SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment); 3118 SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment); 3119 SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment); 3120 SharedMiscCodeSize = align_size_up(SharedMiscCodeSize, max_alignment); 3121 3122 // Initialize with the sum of the shared space sizes. The read-only 3123 // and read write metaspace chunks will be allocated out of this and the 3124 // remainder is the misc code and data chunks. 3125 cds_total = FileMapInfo::shared_spaces_size(); 3126 cds_total = align_size_up(cds_total, _reserve_alignment); 3127 _space_list = new VirtualSpaceList(cds_total/wordSize); 3128 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3129 3130 if (!_space_list->initialization_succeeded()) { 3131 vm_exit_during_initialization("Unable to dump shared archive.", NULL); 3132 } 3133 3134 #ifdef _LP64 3135 if (cds_total + compressed_class_space_size() > UnscaledClassSpaceMax) { 3136 vm_exit_during_initialization("Unable to dump shared archive.", 3137 err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space (" 3138 SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed " 3139 "klass limit: " SIZE_FORMAT, cds_total, compressed_class_space_size(), 3140 cds_total + compressed_class_space_size(), UnscaledClassSpaceMax)); 3141 } 3142 3143 // Set the compressed klass pointer base so that decoding of these pointers works 3144 // properly when creating the shared archive. 3145 assert(UseCompressedOops && UseCompressedClassPointers, 3146 "UseCompressedOops and UseCompressedClassPointers must be set"); 3147 Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom()); 3148 if (TraceMetavirtualspaceAllocation && Verbose) { 3149 gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT, 3150 _space_list->current_virtual_space()->bottom()); 3151 } 3152 3153 Universe::set_narrow_klass_shift(0); 3154 #endif 3155 3156 } else { 3157 // If using shared space, open the file that contains the shared space 3158 // and map in the memory before initializing the rest of metaspace (so 3159 // the addresses don't conflict) 3160 address cds_address = NULL; 3161 if (UseSharedSpaces) { 3162 FileMapInfo* mapinfo = new FileMapInfo(); 3163 memset(mapinfo, 0, sizeof(FileMapInfo)); 3164 3165 // Open the shared archive file, read and validate the header. If 3166 // initialization fails, shared spaces [UseSharedSpaces] are 3167 // disabled and the file is closed. 3168 // Map in spaces now also 3169 if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) { 3170 FileMapInfo::set_current_info(mapinfo); 3171 cds_total = FileMapInfo::shared_spaces_size(); 3172 cds_address = (address)mapinfo->region_base(0); 3173 } else { 3174 assert(!mapinfo->is_open() && !UseSharedSpaces, 3175 "archive file not closed or shared spaces not disabled."); 3176 } 3177 } 3178 3179 #ifdef _LP64 3180 // If UseCompressedClassPointers is set then allocate the metaspace area 3181 // above the heap and above the CDS area (if it exists). 3182 if (using_class_space()) { 3183 if (UseSharedSpaces) { 3184 char* cds_end = (char*)(cds_address + cds_total); 3185 cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment); 3186 allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); 3187 } else { 3188 char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment); 3189 allocate_metaspace_compressed_klass_ptrs(base, 0); 3190 } 3191 } 3192 #endif 3193 3194 // Initialize these before initializing the VirtualSpaceList 3195 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord; 3196 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size); 3197 // Make the first class chunk bigger than a medium chunk so it's not put 3198 // on the medium chunk list. The next chunk will be small and progress 3199 // from there. This size calculated by -version. 3200 _first_class_chunk_word_size = MIN2((uintx)MediumChunk*6, 3201 (CompressedClassSpaceSize/BytesPerWord)*2); 3202 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size); 3203 // Arbitrarily set the initial virtual space to a multiple 3204 // of the boot class loader size. 3205 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size; 3206 word_size = align_size_up(word_size, Metaspace::reserve_alignment_words()); 3207 3208 // Initialize the list of virtual spaces. 3209 _space_list = new VirtualSpaceList(word_size); 3210 _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk); 3211 3212 if (!_space_list->initialization_succeeded()) { 3213 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL); 3214 } 3215 } 3216 3217 _tracer = new MetaspaceTracer(); 3218 } 3219 3220 void Metaspace::post_initialize() { 3221 MetaspaceGC::post_initialize(); 3222 } 3223 3224 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype, 3225 size_t chunk_word_size, 3226 size_t chunk_bunch) { 3227 // Get a chunk from the chunk freelist 3228 Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size); 3229 if (chunk != NULL) { 3230 return chunk; 3231 } 3232 3233 return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch); 3234 } 3235 3236 void Metaspace::initialize(Mutex* lock, MetaspaceType type) { 3237 3238 assert(space_list() != NULL, 3239 "Metadata VirtualSpaceList has not been initialized"); 3240 assert(chunk_manager_metadata() != NULL, 3241 "Metadata ChunkManager has not been initialized"); 3242 3243 _vsm = new SpaceManager(NonClassType, lock); 3244 if (_vsm == NULL) { 3245 return; 3246 } 3247 size_t word_size; 3248 size_t class_word_size; 3249 vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size); 3250 3251 if (using_class_space()) { 3252 assert(class_space_list() != NULL, 3253 "Class VirtualSpaceList has not been initialized"); 3254 assert(chunk_manager_class() != NULL, 3255 "Class ChunkManager has not been initialized"); 3256 3257 // Allocate SpaceManager for classes. 3258 _class_vsm = new SpaceManager(ClassType, lock); 3259 if (_class_vsm == NULL) { 3260 return; 3261 } 3262 } 3263 3264 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3265 3266 // Allocate chunk for metadata objects 3267 Metachunk* new_chunk = get_initialization_chunk(NonClassType, 3268 word_size, 3269 vsm()->medium_chunk_bunch()); 3270 assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks"); 3271 if (new_chunk != NULL) { 3272 // Add to this manager's list of chunks in use and current_chunk(). 3273 vsm()->add_chunk(new_chunk, true); 3274 } 3275 3276 // Allocate chunk for class metadata objects 3277 if (using_class_space()) { 3278 Metachunk* class_chunk = get_initialization_chunk(ClassType, 3279 class_word_size, 3280 class_vsm()->medium_chunk_bunch()); 3281 if (class_chunk != NULL) { 3282 class_vsm()->add_chunk(class_chunk, true); 3283 } 3284 } 3285 3286 _alloc_record_head = NULL; 3287 _alloc_record_tail = NULL; 3288 } 3289 3290 size_t Metaspace::align_word_size_up(size_t word_size) { 3291 size_t byte_size = word_size * wordSize; 3292 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize; 3293 } 3294 3295 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) { 3296 // DumpSharedSpaces doesn't use class metadata area (yet) 3297 // Also, don't use class_vsm() unless UseCompressedClassPointers is true. 3298 if (is_class_space_allocation(mdtype)) { 3299 return class_vsm()->allocate(word_size); 3300 } else { 3301 return vsm()->allocate(word_size); 3302 } 3303 } 3304 3305 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) { 3306 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord); 3307 assert(delta_bytes > 0, "Must be"); 3308 3309 size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes); 3310 3311 // capacity_until_GC might be updated concurrently, must calculate previous value. 3312 size_t before_inc = after_inc - delta_bytes; 3313 3314 tracer()->report_gc_threshold(before_inc, after_inc, 3315 MetaspaceGCThresholdUpdater::ExpandAndAllocate); 3316 if (PrintGCDetails && Verbose) { 3317 gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT 3318 " to " SIZE_FORMAT, before_inc, after_inc); 3319 } 3320 3321 return allocate(word_size, mdtype); 3322 } 3323 3324 // Space allocated in the Metaspace. This may 3325 // be across several metadata virtual spaces. 3326 char* Metaspace::bottom() const { 3327 assert(DumpSharedSpaces, "only useful and valid for dumping shared spaces"); 3328 return (char*)vsm()->current_chunk()->bottom(); 3329 } 3330 3331 size_t Metaspace::used_words_slow(MetadataType mdtype) const { 3332 if (mdtype == ClassType) { 3333 return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0; 3334 } else { 3335 return vsm()->sum_used_in_chunks_in_use(); // includes overhead! 3336 } 3337 } 3338 3339 size_t Metaspace::free_words_slow(MetadataType mdtype) const { 3340 if (mdtype == ClassType) { 3341 return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0; 3342 } else { 3343 return vsm()->sum_free_in_chunks_in_use(); 3344 } 3345 } 3346 3347 // Space capacity in the Metaspace. It includes 3348 // space in the list of chunks from which allocations 3349 // have been made. Don't include space in the global freelist and 3350 // in the space available in the dictionary which 3351 // is already counted in some chunk. 3352 size_t Metaspace::capacity_words_slow(MetadataType mdtype) const { 3353 if (mdtype == ClassType) { 3354 return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0; 3355 } else { 3356 return vsm()->sum_capacity_in_chunks_in_use(); 3357 } 3358 } 3359 3360 size_t Metaspace::used_bytes_slow(MetadataType mdtype) const { 3361 return used_words_slow(mdtype) * BytesPerWord; 3362 } 3363 3364 size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const { 3365 return capacity_words_slow(mdtype) * BytesPerWord; 3366 } 3367 3368 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) { 3369 assert(!SafepointSynchronize::is_at_safepoint() 3370 || Thread::current()->is_VM_thread(), "should be the VM thread"); 3371 3372 MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag); 3373 3374 if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) { 3375 // Dark matter. Too small for dictionary. 3376 #ifdef ASSERT 3377 Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5); 3378 #endif 3379 return; 3380 } 3381 if (is_class && using_class_space()) { 3382 class_vsm()->deallocate(ptr, word_size); 3383 } else { 3384 vsm()->deallocate(ptr, word_size); 3385 } 3386 } 3387 3388 3389 MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size, 3390 bool read_only, MetaspaceObj::Type type, TRAPS) { 3391 if (HAS_PENDING_EXCEPTION) { 3392 assert(false, "Should not allocate with exception pending"); 3393 return NULL; // caller does a CHECK_NULL too 3394 } 3395 3396 assert(loader_data != NULL, "Should never pass around a NULL loader_data. " 3397 "ClassLoaderData::the_null_class_loader_data() should have been used."); 3398 3399 // Allocate in metaspaces without taking out a lock, because it deadlocks 3400 // with the SymbolTable_lock. Dumping is single threaded for now. We'll have 3401 // to revisit this for application class data sharing. 3402 if (DumpSharedSpaces) { 3403 assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity"); 3404 Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace(); 3405 MetaWord* result = space->allocate(word_size, NonClassType); 3406 if (result == NULL) { 3407 report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite); 3408 } 3409 3410 space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size)); 3411 3412 // Zero initialize. 3413 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3414 3415 return result; 3416 } 3417 3418 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType; 3419 3420 // Try to allocate metadata. 3421 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype); 3422 3423 if (result == NULL) { 3424 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype); 3425 3426 // Allocation failed. 3427 if (is_init_completed()) { 3428 // Only start a GC if the bootstrapping has completed. 3429 3430 // Try to clean out some memory and retry. 3431 result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation( 3432 loader_data, word_size, mdtype); 3433 } 3434 } 3435 3436 if (result == NULL) { 3437 report_metadata_oome(loader_data, word_size, type, mdtype, CHECK_NULL); 3438 } 3439 3440 // Zero initialize. 3441 Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0); 3442 3443 return result; 3444 } 3445 3446 size_t Metaspace::class_chunk_size(size_t word_size) { 3447 assert(using_class_space(), "Has to use class space"); 3448 return class_vsm()->calc_chunk_size(word_size); 3449 } 3450 3451 void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) { 3452 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype); 3453 3454 // If result is still null, we are out of memory. 3455 if (Verbose && TraceMetadataChunkAllocation) { 3456 gclog_or_tty->print_cr("Metaspace allocation failed for size " 3457 SIZE_FORMAT, word_size); 3458 if (loader_data->metaspace_or_null() != NULL) { 3459 loader_data->dump(gclog_or_tty); 3460 } 3461 MetaspaceAux::dump(gclog_or_tty); 3462 } 3463 3464 bool out_of_compressed_class_space = false; 3465 if (is_class_space_allocation(mdtype)) { 3466 Metaspace* metaspace = loader_data->metaspace_non_null(); 3467 out_of_compressed_class_space = 3468 MetaspaceAux::committed_bytes(Metaspace::ClassType) + 3469 (metaspace->class_chunk_size(word_size) * BytesPerWord) > 3470 CompressedClassSpaceSize; 3471 } 3472 3473 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 3474 const char* space_string = out_of_compressed_class_space ? 3475 "Compressed class space" : "Metaspace"; 3476 3477 report_java_out_of_memory(space_string); 3478 3479 if (JvmtiExport::should_post_resource_exhausted()) { 3480 JvmtiExport::post_resource_exhausted( 3481 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 3482 space_string); 3483 } 3484 3485 if (!is_init_completed()) { 3486 vm_exit_during_initialization("OutOfMemoryError", space_string); 3487 } 3488 3489 if (out_of_compressed_class_space) { 3490 THROW_OOP(Universe::out_of_memory_error_class_metaspace()); 3491 } else { 3492 THROW_OOP(Universe::out_of_memory_error_metaspace()); 3493 } 3494 } 3495 3496 const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) { 3497 switch (mdtype) { 3498 case Metaspace::ClassType: return "Class"; 3499 case Metaspace::NonClassType: return "Metadata"; 3500 default: 3501 assert(false, err_msg("Got bad mdtype: %d", (int) mdtype)); 3502 return NULL; 3503 } 3504 } 3505 3506 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) { 3507 assert(DumpSharedSpaces, "sanity"); 3508 3509 AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize); 3510 if (_alloc_record_head == NULL) { 3511 _alloc_record_head = _alloc_record_tail = rec; 3512 } else { 3513 _alloc_record_tail->_next = rec; 3514 _alloc_record_tail = rec; 3515 } 3516 } 3517 3518 void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) { 3519 assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces"); 3520 3521 address last_addr = (address)bottom(); 3522 3523 for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) { 3524 address ptr = rec->_ptr; 3525 if (last_addr < ptr) { 3526 closure->doit(last_addr, MetaspaceObj::UnknownType, ptr - last_addr); 3527 } 3528 closure->doit(ptr, rec->_type, rec->_byte_size); 3529 last_addr = ptr + rec->_byte_size; 3530 } 3531 3532 address top = ((address)bottom()) + used_bytes_slow(Metaspace::NonClassType); 3533 if (last_addr < top) { 3534 closure->doit(last_addr, MetaspaceObj::UnknownType, top - last_addr); 3535 } 3536 } 3537 3538 void Metaspace::purge(MetadataType mdtype) { 3539 get_space_list(mdtype)->purge(get_chunk_manager(mdtype)); 3540 } 3541 3542 void Metaspace::purge() { 3543 MutexLockerEx cl(SpaceManager::expand_lock(), 3544 Mutex::_no_safepoint_check_flag); 3545 purge(NonClassType); 3546 if (using_class_space()) { 3547 purge(ClassType); 3548 } 3549 } 3550 3551 void Metaspace::print_on(outputStream* out) const { 3552 // Print both class virtual space counts and metaspace. 3553 if (Verbose) { 3554 vsm()->print_on(out); 3555 if (using_class_space()) { 3556 class_vsm()->print_on(out); 3557 } 3558 } 3559 } 3560 3561 bool Metaspace::contains(const void* ptr) { 3562 if (UseSharedSpaces && MetaspaceShared::is_in_shared_space(ptr)) { 3563 return true; 3564 } 3565 3566 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) { 3567 return true; 3568 } 3569 3570 return get_space_list(NonClassType)->contains(ptr); 3571 } 3572 3573 void Metaspace::verify() { 3574 vsm()->verify(); 3575 if (using_class_space()) { 3576 class_vsm()->verify(); 3577 } 3578 } 3579 3580 void Metaspace::dump(outputStream* const out) const { 3581 out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm()); 3582 vsm()->dump(out); 3583 if (using_class_space()) { 3584 out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm()); 3585 class_vsm()->dump(out); 3586 } 3587 } 3588 3589 /////////////// Unit tests /////////////// 3590 3591 #ifndef PRODUCT 3592 3593 class TestMetaspaceAuxTest : AllStatic { 3594 public: 3595 static void test_reserved() { 3596 size_t reserved = MetaspaceAux::reserved_bytes(); 3597 3598 assert(reserved > 0, "assert"); 3599 3600 size_t committed = MetaspaceAux::committed_bytes(); 3601 assert(committed <= reserved, "assert"); 3602 3603 size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType); 3604 assert(reserved_metadata > 0, "assert"); 3605 assert(reserved_metadata <= reserved, "assert"); 3606 3607 if (UseCompressedClassPointers) { 3608 size_t reserved_class = MetaspaceAux::reserved_bytes(Metaspace::ClassType); 3609 assert(reserved_class > 0, "assert"); 3610 assert(reserved_class < reserved, "assert"); 3611 } 3612 } 3613 3614 static void test_committed() { 3615 size_t committed = MetaspaceAux::committed_bytes(); 3616 3617 assert(committed > 0, "assert"); 3618 3619 size_t reserved = MetaspaceAux::reserved_bytes(); 3620 assert(committed <= reserved, "assert"); 3621 3622 size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType); 3623 assert(committed_metadata > 0, "assert"); 3624 assert(committed_metadata <= committed, "assert"); 3625 3626 if (UseCompressedClassPointers) { 3627 size_t committed_class = MetaspaceAux::committed_bytes(Metaspace::ClassType); 3628 assert(committed_class > 0, "assert"); 3629 assert(committed_class < committed, "assert"); 3630 } 3631 } 3632 3633 static void test_virtual_space_list_large_chunk() { 3634 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity()); 3635 MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3636 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be 3637 // vm_allocation_granularity aligned on Windows. 3638 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord)); 3639 large_size += (os::vm_page_size()/BytesPerWord); 3640 vs_list->get_new_chunk(large_size, large_size, 0); 3641 } 3642 3643 static void test() { 3644 test_reserved(); 3645 test_committed(); 3646 test_virtual_space_list_large_chunk(); 3647 } 3648 }; 3649 3650 void TestMetaspaceAux_test() { 3651 TestMetaspaceAuxTest::test(); 3652 } 3653 3654 class TestVirtualSpaceNodeTest { 3655 static void chunk_up(size_t words_left, size_t& num_medium_chunks, 3656 size_t& num_small_chunks, 3657 size_t& num_specialized_chunks) { 3658 num_medium_chunks = words_left / MediumChunk; 3659 words_left = words_left % MediumChunk; 3660 3661 num_small_chunks = words_left / SmallChunk; 3662 words_left = words_left % SmallChunk; 3663 // how many specialized chunks can we get? 3664 num_specialized_chunks = words_left / SpecializedChunk; 3665 assert(words_left % SpecializedChunk == 0, "should be nothing left"); 3666 } 3667 3668 public: 3669 static void test() { 3670 MutexLockerEx ml(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag); 3671 const size_t vsn_test_size_words = MediumChunk * 4; 3672 const size_t vsn_test_size_bytes = vsn_test_size_words * BytesPerWord; 3673 3674 // The chunk sizes must be multiples of eachother, or this will fail 3675 STATIC_ASSERT(MediumChunk % SmallChunk == 0); 3676 STATIC_ASSERT(SmallChunk % SpecializedChunk == 0); 3677 3678 { // No committed memory in VSN 3679 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3680 VirtualSpaceNode vsn(vsn_test_size_bytes); 3681 vsn.initialize(); 3682 vsn.retire(&cm); 3683 assert(cm.sum_free_chunks_count() == 0, "did not commit any memory in the VSN"); 3684 } 3685 3686 { // All of VSN is committed, half is used by chunks 3687 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3688 VirtualSpaceNode vsn(vsn_test_size_bytes); 3689 vsn.initialize(); 3690 vsn.expand_by(vsn_test_size_words, vsn_test_size_words); 3691 vsn.get_chunk_vs(MediumChunk); 3692 vsn.get_chunk_vs(MediumChunk); 3693 vsn.retire(&cm); 3694 assert(cm.sum_free_chunks_count() == 2, "should have been memory left for 2 medium chunks"); 3695 assert(cm.sum_free_chunks() == 2*MediumChunk, "sizes should add up"); 3696 } 3697 3698 { // 4 pages of VSN is committed, some is used by chunks 3699 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3700 VirtualSpaceNode vsn(vsn_test_size_bytes); 3701 const size_t page_chunks = 4 * (size_t)os::vm_page_size() / BytesPerWord; 3702 assert(page_chunks < MediumChunk, "Test expects medium chunks to be at least 4*page_size"); 3703 vsn.initialize(); 3704 vsn.expand_by(page_chunks, page_chunks); 3705 vsn.get_chunk_vs(SmallChunk); 3706 vsn.get_chunk_vs(SpecializedChunk); 3707 vsn.retire(&cm); 3708 3709 // committed - used = words left to retire 3710 const size_t words_left = page_chunks - SmallChunk - SpecializedChunk; 3711 3712 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3713 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3714 3715 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3716 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3717 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3718 } 3719 3720 { // Half of VSN is committed, a humongous chunk is used 3721 ChunkManager cm(SpecializedChunk, SmallChunk, MediumChunk); 3722 VirtualSpaceNode vsn(vsn_test_size_bytes); 3723 vsn.initialize(); 3724 vsn.expand_by(MediumChunk * 2, MediumChunk * 2); 3725 vsn.get_chunk_vs(MediumChunk + SpecializedChunk); // Humongous chunks will be aligned up to MediumChunk + SpecializedChunk 3726 vsn.retire(&cm); 3727 3728 const size_t words_left = MediumChunk * 2 - (MediumChunk + SpecializedChunk); 3729 size_t num_medium_chunks, num_small_chunks, num_spec_chunks; 3730 chunk_up(words_left, num_medium_chunks, num_small_chunks, num_spec_chunks); 3731 3732 assert(num_medium_chunks == 0, "should not get any medium chunks"); 3733 assert(cm.sum_free_chunks_count() == (num_small_chunks + num_spec_chunks), "should be space for 3 chunks"); 3734 assert(cm.sum_free_chunks() == words_left, "sizes should add up"); 3735 } 3736 3737 } 3738 3739 #define assert_is_available_positive(word_size) \ 3740 assert(vsn.is_available(word_size), \ 3741 err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \ 3742 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3743 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3744 3745 #define assert_is_available_negative(word_size) \ 3746 assert(!vsn.is_available(word_size), \ 3747 err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \ 3748 "VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \ 3749 (uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end())); 3750 3751 static void test_is_available_positive() { 3752 // Reserve some memory. 3753 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3754 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3755 3756 // Commit some memory. 3757 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3758 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3759 assert(expanded, "Failed to commit"); 3760 3761 // Check that is_available accepts the committed size. 3762 assert_is_available_positive(commit_word_size); 3763 3764 // Check that is_available accepts half the committed size. 3765 size_t expand_word_size = commit_word_size / 2; 3766 assert_is_available_positive(expand_word_size); 3767 } 3768 3769 static void test_is_available_negative() { 3770 // Reserve some memory. 3771 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3772 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3773 3774 // Commit some memory. 3775 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3776 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3777 assert(expanded, "Failed to commit"); 3778 3779 // Check that is_available doesn't accept a too large size. 3780 size_t two_times_commit_word_size = commit_word_size * 2; 3781 assert_is_available_negative(two_times_commit_word_size); 3782 } 3783 3784 static void test_is_available_overflow() { 3785 // Reserve some memory. 3786 VirtualSpaceNode vsn(os::vm_allocation_granularity()); 3787 assert(vsn.initialize(), "Failed to setup VirtualSpaceNode"); 3788 3789 // Commit some memory. 3790 size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord; 3791 bool expanded = vsn.expand_by(commit_word_size, commit_word_size); 3792 assert(expanded, "Failed to commit"); 3793 3794 // Calculate a size that will overflow the virtual space size. 3795 void* virtual_space_max = (void*)(uintptr_t)-1; 3796 size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1); 3797 size_t overflow_size = bottom_to_max + BytesPerWord; 3798 size_t overflow_word_size = overflow_size / BytesPerWord; 3799 3800 // Check that is_available can handle the overflow. 3801 assert_is_available_negative(overflow_word_size); 3802 } 3803 3804 static void test_is_available() { 3805 TestVirtualSpaceNodeTest::test_is_available_positive(); 3806 TestVirtualSpaceNodeTest::test_is_available_negative(); 3807 TestVirtualSpaceNodeTest::test_is_available_overflow(); 3808 } 3809 }; 3810 3811 void TestVirtualSpaceNode_test() { 3812 TestVirtualSpaceNodeTest::test(); 3813 TestVirtualSpaceNodeTest::test_is_available(); 3814 } 3815 #endif