1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.
  55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  57 
  58 size_t MinChunkSize = 0;
  59 
  60 void CompactibleFreeListSpace::set_cms_values() {
  61   // Set CMS global values
  62   assert(MinChunkSize == 0, "already set");
  63 
  64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  65   // for chunks to contain a FreeChunk.
  66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  68 
  69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  70   IndexSetStart  = MinChunkSize;
  71   IndexSetStride = MinObjAlignment;
  72 }
  73 
  74 // Constructor
  75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  76   MemRegion mr, bool use_adaptive_freelists,
  77   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  78   _dictionaryChoice(dictionaryChoice),
  79   _adaptive_freelists(use_adaptive_freelists),
  80   _bt(bs, mr),
  81   // free list locks are in the range of values taken by _lockRank
  82   // This range currently is [_leaf+2, _leaf+3]
  83   // Note: this requires that CFLspace c'tors
  84   // are called serially in the order in which the locks are
  85   // are acquired in the program text. This is true today.
  86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  87   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  88                           "CompactibleFreeListSpace._dict_par_lock", true),
  89   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSRescanMultiple),
  91   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSConcMarkMultiple),
  93   _collector(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99   // We have all of "mr", all of which we place in the dictionary
 100   // as one big chunk. We'll need to decide here which of several
 101   // possible alternative dictionary implementations to use. For
 102   // now the choice is easy, since we have only one working
 103   // implementation, namely, the simple binary tree (splaying
 104   // temporarily disabled).
 105   switch (dictionaryChoice) {
 106     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 107       _dictionary = new AFLBinaryTreeDictionary(mr);
 108       break;
 109     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 110     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 111     default:
 112       warning("dictionaryChoice: selected option not understood; using"
 113               " default BinaryTreeDictionary implementation instead.");
 114   }
 115   assert(_dictionary != NULL, "CMS dictionary initialization");
 116   // The indexed free lists are initially all empty and are lazily
 117   // filled in on demand. Initialize the array elements to NULL.
 118   initializeIndexedFreeListArray();
 119 
 120   // Not using adaptive free lists assumes that allocation is first
 121   // from the linAB's.  Also a cms perm gen which can be compacted
 122   // has to have the klass's klassKlass allocated at a lower
 123   // address in the heap than the klass so that the klassKlass is
 124   // moved to its new location before the klass is moved.
 125   // Set the _refillSize for the linear allocation blocks
 126   if (!use_adaptive_freelists) {
 127     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 128                                            FreeBlockDictionary<FreeChunk>::atLeast);
 129     // The small linAB initially has all the space and will allocate
 130     // a chunk of any size.
 131     HeapWord* addr = (HeapWord*) fc;
 132     _smallLinearAllocBlock.set(addr, fc->size() ,
 133       1024*SmallForLinearAlloc, fc->size());
 134     // Note that _unallocated_block is not updated here.
 135     // Allocations from the linear allocation block should
 136     // update it.
 137   } else {
 138     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 139                                SmallForLinearAlloc);
 140   }
 141   // CMSIndexedFreeListReplenish should be at least 1
 142   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 143   _promoInfo.setSpace(this);
 144   if (UseCMSBestFit) {
 145     _fitStrategy = FreeBlockBestFitFirst;
 146   } else {
 147     _fitStrategy = FreeBlockStrategyNone;
 148   }
 149   check_free_list_consistency();
 150 
 151   // Initialize locks for parallel case.
 152 
 153   if (CollectedHeap::use_parallel_gc_threads()) {
 154     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 155       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 156                                               "a freelist par lock",
 157                                               true);
 158       DEBUG_ONLY(
 159         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 160       )
 161     }
 162     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 163   }
 164 }
 165 
 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
 167 // update the block offset table.  Removed initialize_threshold call because
 168 // CFLS does not use a block offset array for contiguous spaces.
 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 170                                     CompactPoint* cp, HeapWord* compact_top) {
 171   // q is alive
 172   // First check if we should switch compaction space
 173   assert(this == cp->space, "'this' should be current compaction space.");
 174   size_t compaction_max_size = pointer_delta(end(), compact_top);
 175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 176     "virtual adjustObjectSize_v() method is not correct");
 177   size_t adjusted_size = adjustObjectSize(size);
 178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 179          "no small fragments allowed");
 180   assert(minimum_free_block_size() == MinChunkSize,
 181          "for de-virtualized reference below");
 182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 183   if (adjusted_size + MinChunkSize > compaction_max_size &&
 184       adjusted_size != compaction_max_size) {
 185     do {
 186       // switch to next compaction space
 187       cp->space->set_compaction_top(compact_top);
 188       cp->space = cp->space->next_compaction_space();
 189       if (cp->space == NULL) {
 190         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 191         assert(cp->gen != NULL, "compaction must succeed");
 192         cp->space = cp->gen->first_compaction_space();
 193         assert(cp->space != NULL, "generation must have a first compaction space");
 194       }
 195       compact_top = cp->space->bottom();
 196       cp->space->set_compaction_top(compact_top);
 197       // The correct adjusted_size may not be the same as that for this method
 198       // (i.e., cp->space may no longer be "this" so adjust the size again.
 199       // Use the virtual method which is not used above to save the virtual
 200       // dispatch.
 201       adjusted_size = cp->space->adjust_object_size_v(size);
 202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 204     } while (adjusted_size > compaction_max_size);
 205   }
 206 
 207   // store the forwarding pointer into the mark word
 208   if ((HeapWord*)q != compact_top) {
 209     q->forward_to(oop(compact_top));
 210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 211   } else {
 212     // if the object isn't moving we can just set the mark to the default
 213     // mark and handle it specially later on.
 214     q->init_mark();
 215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 216   }
 217 
 218   compact_top += adjusted_size;
 219 
 220   // we need to update the offset table so that the beginnings of objects can be
 221   // found during scavenge.  Note that we are updating the offset table based on
 222   // where the object will be once the compaction phase finishes.
 223 
 224   // Always call cross_threshold().  A contiguous space can only call it when
 225   // the compaction_top exceeds the current threshold but not for an
 226   // non-contiguous space.
 227   cp->threshold =
 228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 229   return compact_top;
 230 }
 231 
 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 233 // and use of single_block instead of alloc_block.  The name here is not really
 234 // appropriate - maybe a more general name could be invented for both the
 235 // contiguous and noncontiguous spaces.
 236 
 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 238   _bt.single_block(start, the_end);
 239   return end();
 240 }
 241 
 242 // Initialize them to NULL.
 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 244   for (size_t i = 0; i < IndexSetSize; i++) {
 245     // Note that on platforms where objects are double word aligned,
 246     // the odd array elements are not used.  It is convenient, however,
 247     // to map directly from the object size to the array element.
 248     _indexedFreeList[i].reset(IndexSetSize);
 249     _indexedFreeList[i].set_size(i);
 250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 254   }
 255 }
 256 
 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 258   for (size_t i = 1; i < IndexSetSize; i++) {
 259     assert(_indexedFreeList[i].size() == (size_t) i,
 260       "Indexed free list sizes are incorrect");
 261     _indexedFreeList[i].reset(IndexSetSize);
 262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 266   }
 267 }
 268 
 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
 270   resetIndexedFreeListArray();
 271   dictionary()->reset();
 272   if (BlockOffsetArrayUseUnallocatedBlock) {
 273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 274     // Everything's allocated until proven otherwise.
 275     _bt.set_unallocated_block(end());
 276   }
 277   if (!mr.is_empty()) {
 278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 279     _bt.single_block(mr.start(), mr.word_size());
 280     FreeChunk* fc = (FreeChunk*) mr.start();
 281     fc->set_size(mr.word_size());
 282     if (mr.word_size() >= IndexSetSize ) {
 283       returnChunkToDictionary(fc);
 284     } else {
 285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 287     }
 288     coalBirth(mr.word_size());
 289   }
 290   _promoInfo.reset();
 291   _smallLinearAllocBlock._ptr = NULL;
 292   _smallLinearAllocBlock._word_size = 0;
 293 }
 294 
 295 void CompactibleFreeListSpace::reset_after_compaction() {
 296   // Reset the space to the new reality - one free chunk.
 297   MemRegion mr(compaction_top(), end());
 298   reset(mr);
 299   // Now refill the linear allocation block(s) if possible.
 300   if (_adaptive_freelists) {
 301     refillLinearAllocBlocksIfNeeded();
 302   } else {
 303     // Place as much of mr in the linAB as we can get,
 304     // provided it was big enough to go into the dictionary.
 305     FreeChunk* fc = dictionary()->find_largest_dict();
 306     if (fc != NULL) {
 307       assert(fc->size() == mr.word_size(),
 308              "Why was the chunk broken up?");
 309       removeChunkFromDictionary(fc);
 310       HeapWord* addr = (HeapWord*) fc;
 311       _smallLinearAllocBlock.set(addr, fc->size() ,
 312         1024*SmallForLinearAlloc, fc->size());
 313       // Note that _unallocated_block is not updated here.
 314     }
 315   }
 316 }
 317 
 318 // Walks the entire dictionary, returning a coterminal
 319 // chunk, if it exists. Use with caution since it involves
 320 // a potentially complete walk of a potentially large tree.
 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 322 
 323   assert_lock_strong(&_freelistLock);
 324 
 325   return dictionary()->find_chunk_ends_at(end());
 326 }
 327 
 328 
 329 #ifndef PRODUCT
 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 331   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 332     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 333   }
 334 }
 335 
 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 337   size_t sum = 0;
 338   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 339     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 340   }
 341   return sum;
 342 }
 343 
 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 345   size_t count = 0;
 346   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 347     debug_only(
 348       ssize_t total_list_count = 0;
 349       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 350          fc = fc->next()) {
 351         total_list_count++;
 352       }
 353       assert(total_list_count ==  _indexedFreeList[i].count(),
 354         "Count in list is incorrect");
 355     )
 356     count += _indexedFreeList[i].count();
 357   }
 358   return count;
 359 }
 360 
 361 size_t CompactibleFreeListSpace::totalCount() {
 362   size_t num = totalCountInIndexedFreeLists();
 363   num +=  dictionary()->total_count();
 364   if (_smallLinearAllocBlock._word_size != 0) {
 365     num++;
 366   }
 367   return num;
 368 }
 369 #endif
 370 
 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 372   FreeChunk* fc = (FreeChunk*) p;
 373   return fc->is_free();
 374 }
 375 
 376 size_t CompactibleFreeListSpace::used() const {
 377   return capacity() - free();
 378 }
 379 
 380 size_t CompactibleFreeListSpace::free() const {
 381   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 382   // if you do this while the structures are in flux you
 383   // may get an approximate answer only; for instance
 384   // because there is concurrent allocation either
 385   // directly by mutators or for promotion during a GC.
 386   // It's "MT-safe", however, in the sense that you are guaranteed
 387   // not to crash and burn, for instance, because of walking
 388   // pointers that could disappear as you were walking them.
 389   // The approximation is because the various components
 390   // that are read below are not read atomically (and
 391   // further the computation of totalSizeInIndexedFreeLists()
 392   // is itself a non-atomic computation. The normal use of
 393   // this is during a resize operation at the end of GC
 394   // and at that time you are guaranteed to get the
 395   // correct actual value. However, for instance, this is
 396   // also read completely asynchronously by the "perf-sampler"
 397   // that supports jvmstat, and you are apt to see the values
 398   // flicker in such cases.
 399   assert(_dictionary != NULL, "No _dictionary?");
 400   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 401           totalSizeInIndexedFreeLists() +
 402           _smallLinearAllocBlock._word_size) * HeapWordSize;
 403 }
 404 
 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 406   assert(_dictionary != NULL, "No _dictionary?");
 407   assert_locked();
 408   size_t res = _dictionary->max_chunk_size();
 409   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 410                        (size_t) SmallForLinearAlloc - 1));
 411   // XXX the following could potentially be pretty slow;
 412   // should one, pessimistically for the rare cases when res
 413   // calculated above is less than IndexSetSize,
 414   // just return res calculated above? My reasoning was that
 415   // those cases will be so rare that the extra time spent doesn't
 416   // really matter....
 417   // Note: do not change the loop test i >= res + IndexSetStride
 418   // to i > res below, because i is unsigned and res may be zero.
 419   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 420        i -= IndexSetStride) {
 421     if (_indexedFreeList[i].head() != NULL) {
 422       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 423       return i;
 424     }
 425   }
 426   return res;
 427 }
 428 
 429 void LinearAllocBlock::print_on(outputStream* st) const {
 430   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 431             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 432             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 433 }
 434 
 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 436   st->print_cr("COMPACTIBLE FREELIST SPACE");
 437   st->print_cr(" Space:");
 438   Space::print_on(st);
 439 
 440   st->print_cr("promoInfo:");
 441   _promoInfo.print_on(st);
 442 
 443   st->print_cr("_smallLinearAllocBlock");
 444   _smallLinearAllocBlock.print_on(st);
 445 
 446   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 447 
 448   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 449                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 450 }
 451 
 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 453 const {
 454   reportIndexedFreeListStatistics();
 455   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 456   gclog_or_tty->print_cr("---------------------------");
 457   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 458   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 459     _indexedFreeList[i].print_on(gclog_or_tty);
 460     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 461          fc = fc->next()) {
 462       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 463                           p2i(fc), p2i((HeapWord*)fc + i),
 464                           fc->cantCoalesce() ? "\t CC" : "");
 465     }
 466   }
 467 }
 468 
 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 470 const {
 471   _promoInfo.print_on(st);
 472 }
 473 
 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 475 const {
 476   _dictionary->report_statistics();
 477   st->print_cr("Layout of Freelists in Tree");
 478   st->print_cr("---------------------------");
 479   _dictionary->print_free_lists(st);
 480 }
 481 
 482 class BlkPrintingClosure: public BlkClosure {
 483   const CMSCollector*             _collector;
 484   const CompactibleFreeListSpace* _sp;
 485   const CMSBitMap*                _live_bit_map;
 486   const bool                      _post_remark;
 487   outputStream*                   _st;
 488 public:
 489   BlkPrintingClosure(const CMSCollector* collector,
 490                      const CompactibleFreeListSpace* sp,
 491                      const CMSBitMap* live_bit_map,
 492                      outputStream* st):
 493     _collector(collector),
 494     _sp(sp),
 495     _live_bit_map(live_bit_map),
 496     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 497     _st(st) { }
 498   size_t do_blk(HeapWord* addr);
 499 };
 500 
 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 502   size_t sz = _sp->block_size_no_stall(addr, _collector);
 503   assert(sz != 0, "Should always be able to compute a size");
 504   if (_sp->block_is_obj(addr)) {
 505     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 506     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 507       p2i(addr),
 508       dead ? "dead" : "live",
 509       sz,
 510       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 511     if (CMSPrintObjectsInDump && !dead) {
 512       oop(addr)->print_on(_st);
 513       _st->print_cr("--------------------------------------");
 514     }
 515   } else { // free block
 516     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 517       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 518     if (CMSPrintChunksInDump) {
 519       ((FreeChunk*)addr)->print_on(_st);
 520       _st->print_cr("--------------------------------------");
 521     }
 522   }
 523   return sz;
 524 }
 525 
 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 527   outputStream* st) {
 528   st->print_cr("\n=========================");
 529   st->print_cr("Block layout in CMS Heap:");
 530   st->print_cr("=========================");
 531   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 532   blk_iterate(&bpcl);
 533 
 534   st->print_cr("\n=======================================");
 535   st->print_cr("Order & Layout of Promotion Info Blocks");
 536   st->print_cr("=======================================");
 537   print_promo_info_blocks(st);
 538 
 539   st->print_cr("\n===========================");
 540   st->print_cr("Order of Indexed Free Lists");
 541   st->print_cr("=========================");
 542   print_indexed_free_lists(st);
 543 
 544   st->print_cr("\n=================================");
 545   st->print_cr("Order of Free Lists in Dictionary");
 546   st->print_cr("=================================");
 547   print_dictionary_free_lists(st);
 548 }
 549 
 550 
 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 552   assert_lock_strong(&_freelistLock);
 553   assert(PrintFLSStatistics != 0, "Reporting error");
 554   _dictionary->report_statistics();
 555   if (PrintFLSStatistics > 1) {
 556     reportIndexedFreeListStatistics();
 557     size_t total_size = totalSizeInIndexedFreeLists() +
 558                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 559     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 560   }
 561 }
 562 
 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 564   assert_lock_strong(&_freelistLock);
 565   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 566                       "--------------------------------\n");
 567   size_t total_size = totalSizeInIndexedFreeLists();
 568   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 569   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 570   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 571   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 572   if (free_blocks != 0) {
 573     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 574   }
 575 }
 576 
 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 578   size_t res = 0;
 579   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 580     debug_only(
 581       ssize_t recount = 0;
 582       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 583          fc = fc->next()) {
 584         recount += 1;
 585       }
 586       assert(recount == _indexedFreeList[i].count(),
 587         "Incorrect count in list");
 588     )
 589     res += _indexedFreeList[i].count();
 590   }
 591   return res;
 592 }
 593 
 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 595   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 596     if (_indexedFreeList[i].head() != NULL) {
 597       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 598       return (size_t)i;
 599     }
 600   }
 601   return 0;
 602 }
 603 
 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 605   HeapWord* prevEnd = end();
 606   assert(prevEnd != value, "unnecessary set_end call");
 607   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 608         "New end is below unallocated block");
 609   _end = value;
 610   if (prevEnd != NULL) {
 611     // Resize the underlying block offset table.
 612     _bt.resize(pointer_delta(value, bottom()));
 613     if (value <= prevEnd) {
 614       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 615              "New end is below unallocated block");
 616     } else {
 617       // Now, take this new chunk and add it to the free blocks.
 618       // Note that the BOT has not yet been updated for this block.
 619       size_t newFcSize = pointer_delta(value, prevEnd);
 620       // XXX This is REALLY UGLY and should be fixed up. XXX
 621       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 622         // Mark the boundary of the new block in BOT
 623         _bt.mark_block(prevEnd, value);
 624         // put it all in the linAB
 625         if (ParallelGCThreads == 0) {
 626           _smallLinearAllocBlock._ptr = prevEnd;
 627           _smallLinearAllocBlock._word_size = newFcSize;
 628           repairLinearAllocBlock(&_smallLinearAllocBlock);
 629         } else { // ParallelGCThreads > 0
 630           MutexLockerEx x(parDictionaryAllocLock(),
 631                           Mutex::_no_safepoint_check_flag);
 632           _smallLinearAllocBlock._ptr = prevEnd;
 633           _smallLinearAllocBlock._word_size = newFcSize;
 634           repairLinearAllocBlock(&_smallLinearAllocBlock);
 635         }
 636         // Births of chunks put into a LinAB are not recorded.  Births
 637         // of chunks as they are allocated out of a LinAB are.
 638       } else {
 639         // Add the block to the free lists, if possible coalescing it
 640         // with the last free block, and update the BOT and census data.
 641         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 642       }
 643     }
 644   }
 645 }
 646 
 647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 648   CompactibleFreeListSpace* _cfls;
 649   CMSCollector* _collector;
 650 protected:
 651   // Override.
 652 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 653   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 654                                        HeapWord* bottom, HeapWord* top, \
 655                                        ClosureType* cl);                \
 656       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 657                                        HeapWord* bottom, HeapWord* top, \
 658                                        ClosureType* cl);                \
 659     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 660                                        HeapWord* bottom, HeapWord* top, \
 661                                        ClosureType* cl)
 662   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 663   walk_mem_region_with_cl_DECL(FilteringClosure);
 664 
 665 public:
 666   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 667                       CMSCollector* collector,
 668                       ExtendedOopClosure* cl,
 669                       CardTableModRefBS::PrecisionStyle precision,
 670                       HeapWord* boundary) :
 671     Filtering_DCTOC(sp, cl, precision, boundary),
 672     _cfls(sp), _collector(collector) {}
 673 };
 674 
 675 // We de-virtualize the block-related calls below, since we know that our
 676 // space is a CompactibleFreeListSpace.
 677 
 678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 680                                                  HeapWord* bottom,              \
 681                                                  HeapWord* top,                 \
 682                                                  ClosureType* cl) {             \
 683    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 684    if (is_par) {                                                                \
 685      assert(SharedHeap::heap()->n_par_threads() ==                              \
 686             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 687      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 688    } else {                                                                     \
 689      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 690    }                                                                            \
 691 }                                                                               \
 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 693                                                       HeapWord* bottom,         \
 694                                                       HeapWord* top,            \
 695                                                       ClosureType* cl) {        \
 696   /* Skip parts that are before "mr", in case "block_start" sent us             \
 697      back too far. */                                                           \
 698   HeapWord* mr_start = mr.start();                                              \
 699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 700   HeapWord* next = bottom + bot_size;                                           \
 701   while (next < mr_start) {                                                     \
 702     bottom = next;                                                              \
 703     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 704     next = bottom + bot_size;                                                   \
 705   }                                                                             \
 706                                                                                 \
 707   while (bottom < top) {                                                        \
 708     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 710                     oop(bottom)) &&                                             \
 711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 713       bottom += _cfls->adjustObjectSize(word_sz);                               \
 714     } else {                                                                    \
 715       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 716     }                                                                           \
 717   }                                                                             \
 718 }                                                                               \
 719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 720                                                         HeapWord* bottom,       \
 721                                                         HeapWord* top,          \
 722                                                         ClosureType* cl) {      \
 723   /* Skip parts that are before "mr", in case "block_start" sent us             \
 724      back too far. */                                                           \
 725   HeapWord* mr_start = mr.start();                                              \
 726   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 727   HeapWord* next = bottom + bot_size;                                           \
 728   while (next < mr_start) {                                                     \
 729     bottom = next;                                                              \
 730     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 731     next = bottom + bot_size;                                                   \
 732   }                                                                             \
 733                                                                                 \
 734   while (bottom < top) {                                                        \
 735     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 736         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 737                     oop(bottom)) &&                                             \
 738         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 739       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 740       bottom += _cfls->adjustObjectSize(word_sz);                               \
 741     } else {                                                                    \
 742       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 743     }                                                                           \
 744   }                                                                             \
 745 }
 746 
 747 // (There are only two of these, rather than N, because the split is due
 748 // only to the introduction of the FilteringClosure, a local part of the
 749 // impl of this abstraction.)
 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 752 
 753 DirtyCardToOopClosure*
 754 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 755                                       CardTableModRefBS::PrecisionStyle precision,
 756                                       HeapWord* boundary) {
 757   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 758 }
 759 
 760 
 761 // Note on locking for the space iteration functions:
 762 // since the collector's iteration activities are concurrent with
 763 // allocation activities by mutators, absent a suitable mutual exclusion
 764 // mechanism the iterators may go awry. For instance a block being iterated
 765 // may suddenly be allocated or divided up and part of it allocated and
 766 // so on.
 767 
 768 // Apply the given closure to each block in the space.
 769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 770   assert_lock_strong(freelistLock());
 771   HeapWord *cur, *limit;
 772   for (cur = bottom(), limit = end(); cur < limit;
 773        cur += cl->do_blk_careful(cur));
 774 }
 775 
 776 // Apply the given closure to each block in the space.
 777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 778   assert_lock_strong(freelistLock());
 779   HeapWord *cur, *limit;
 780   for (cur = bottom(), limit = end(); cur < limit;
 781        cur += cl->do_blk(cur));
 782 }
 783 
 784 // Apply the given closure to each oop in the space.
 785 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 786   assert_lock_strong(freelistLock());
 787   HeapWord *cur, *limit;
 788   size_t curSize;
 789   for (cur = bottom(), limit = end(); cur < limit;
 790        cur += curSize) {
 791     curSize = block_size(cur);
 792     if (block_is_obj(cur)) {
 793       oop(cur)->oop_iterate(cl);
 794     }
 795   }
 796 }
 797 
 798 // NOTE: In the following methods, in order to safely be able to
 799 // apply the closure to an object, we need to be sure that the
 800 // object has been initialized. We are guaranteed that an object
 801 // is initialized if we are holding the Heap_lock with the
 802 // world stopped.
 803 void CompactibleFreeListSpace::verify_objects_initialized() const {
 804   if (is_init_completed()) {
 805     assert_locked_or_safepoint(Heap_lock);
 806     if (Universe::is_fully_initialized()) {
 807       guarantee(SafepointSynchronize::is_at_safepoint(),
 808                 "Required for objects to be initialized");
 809     }
 810   } // else make a concession at vm start-up
 811 }
 812 
 813 // Apply the given closure to each object in the space
 814 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 815   assert_lock_strong(freelistLock());
 816   NOT_PRODUCT(verify_objects_initialized());
 817   HeapWord *cur, *limit;
 818   size_t curSize;
 819   for (cur = bottom(), limit = end(); cur < limit;
 820        cur += curSize) {
 821     curSize = block_size(cur);
 822     if (block_is_obj(cur)) {
 823       blk->do_object(oop(cur));
 824     }
 825   }
 826 }
 827 
 828 // Apply the given closure to each live object in the space
 829 //   The usage of CompactibleFreeListSpace
 830 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 831 // objects in the space with references to objects that are no longer
 832 // valid.  For example, an object may reference another object
 833 // that has already been sweep up (collected).  This method uses
 834 // obj_is_alive() to determine whether it is safe to apply the closure to
 835 // an object.  See obj_is_alive() for details on how liveness of an
 836 // object is decided.
 837 
 838 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 839   assert_lock_strong(freelistLock());
 840   NOT_PRODUCT(verify_objects_initialized());
 841   HeapWord *cur, *limit;
 842   size_t curSize;
 843   for (cur = bottom(), limit = end(); cur < limit;
 844        cur += curSize) {
 845     curSize = block_size(cur);
 846     if (block_is_obj(cur) && obj_is_alive(cur)) {
 847       blk->do_object(oop(cur));
 848     }
 849   }
 850 }
 851 
 852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 853                                                   UpwardsObjectClosure* cl) {
 854   assert_locked(freelistLock());
 855   NOT_PRODUCT(verify_objects_initialized());
 856   assert(!mr.is_empty(), "Should be non-empty");
 857   // We use MemRegion(bottom(), end()) rather than used_region() below
 858   // because the two are not necessarily equal for some kinds of
 859   // spaces, in particular, certain kinds of free list spaces.
 860   // We could use the more complicated but more precise:
 861   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 862   // but the slight imprecision seems acceptable in the assertion check.
 863   assert(MemRegion(bottom(), end()).contains(mr),
 864          "Should be within used space");
 865   HeapWord* prev = cl->previous();   // max address from last time
 866   if (prev >= mr.end()) { // nothing to do
 867     return;
 868   }
 869   // This assert will not work when we go from cms space to perm
 870   // space, and use same closure. Easy fix deferred for later. XXX YSR
 871   // assert(prev == NULL || contains(prev), "Should be within space");
 872 
 873   bool last_was_obj_array = false;
 874   HeapWord *blk_start_addr, *region_start_addr;
 875   if (prev > mr.start()) {
 876     region_start_addr = prev;
 877     blk_start_addr    = prev;
 878     // The previous invocation may have pushed "prev" beyond the
 879     // last allocated block yet there may be still be blocks
 880     // in this region due to a particular coalescing policy.
 881     // Relax the assertion so that the case where the unallocated
 882     // block is maintained and "prev" is beyond the unallocated
 883     // block does not cause the assertion to fire.
 884     assert((BlockOffsetArrayUseUnallocatedBlock &&
 885             (!is_in(prev))) ||
 886            (blk_start_addr == block_start(region_start_addr)), "invariant");
 887   } else {
 888     region_start_addr = mr.start();
 889     blk_start_addr    = block_start(region_start_addr);
 890   }
 891   HeapWord* region_end_addr = mr.end();
 892   MemRegion derived_mr(region_start_addr, region_end_addr);
 893   while (blk_start_addr < region_end_addr) {
 894     const size_t size = block_size(blk_start_addr);
 895     if (block_is_obj(blk_start_addr)) {
 896       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 897     } else {
 898       last_was_obj_array = false;
 899     }
 900     blk_start_addr += size;
 901   }
 902   if (!last_was_obj_array) {
 903     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 904            "Should be within (closed) used space");
 905     assert(blk_start_addr > prev, "Invariant");
 906     cl->set_previous(blk_start_addr); // min address for next time
 907   }
 908 }
 909 
 910 
 911 // Callers of this iterator beware: The closure application should
 912 // be robust in the face of uninitialized objects and should (always)
 913 // return a correct size so that the next addr + size below gives us a
 914 // valid block boundary. [See for instance,
 915 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 916 // in ConcurrentMarkSweepGeneration.cpp.]
 917 HeapWord*
 918 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 919   ObjectClosureCareful* cl) {
 920   assert_lock_strong(freelistLock());
 921   // Can't use used_region() below because it may not necessarily
 922   // be the same as [bottom(),end()); although we could
 923   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 924   // that appears too cumbersome, so we just do the simpler check
 925   // in the assertion below.
 926   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 927          "mr should be non-empty and within used space");
 928   HeapWord *addr, *end;
 929   size_t size;
 930   for (addr = block_start_careful(mr.start()), end  = mr.end();
 931        addr < end; addr += size) {
 932     FreeChunk* fc = (FreeChunk*)addr;
 933     if (fc->is_free()) {
 934       // Since we hold the free list lock, which protects direct
 935       // allocation in this generation by mutators, a free object
 936       // will remain free throughout this iteration code.
 937       size = fc->size();
 938     } else {
 939       // Note that the object need not necessarily be initialized,
 940       // because (for instance) the free list lock does NOT protect
 941       // object initialization. The closure application below must
 942       // therefore be correct in the face of uninitialized objects.
 943       size = cl->do_object_careful_m(oop(addr), mr);
 944       if (size == 0) {
 945         // An unparsable object found. Signal early termination.
 946         return addr;
 947       }
 948     }
 949   }
 950   return NULL;
 951 }
 952 
 953 
 954 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 955   NOT_PRODUCT(verify_objects_initialized());
 956   return _bt.block_start(p);
 957 }
 958 
 959 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 960   return _bt.block_start_careful(p);
 961 }
 962 
 963 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 964   NOT_PRODUCT(verify_objects_initialized());
 965   // This must be volatile, or else there is a danger that the compiler
 966   // will compile the code below into a sometimes-infinite loop, by keeping
 967   // the value read the first time in a register.
 968   while (true) {
 969     // We must do this until we get a consistent view of the object.
 970     if (FreeChunk::indicatesFreeChunk(p)) {
 971       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 972       size_t res = fc->size();
 973 
 974       // Bugfix for systems with weak memory model (PPC64/IA64). The
 975       // block's free bit was set and we have read the size of the
 976       // block. Acquire and check the free bit again. If the block is
 977       // still free, the read size is correct.
 978       OrderAccess::acquire();
 979 
 980       // If the object is still a free chunk, return the size, else it
 981       // has been allocated so try again.
 982       if (FreeChunk::indicatesFreeChunk(p)) {
 983         assert(res != 0, "Block size should not be 0");
 984         return res;
 985       }
 986     } else {
 987       // must read from what 'p' points to in each loop.
 988       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 989       if (k != NULL) {
 990         assert(k->is_klass(), "Should really be klass oop.");
 991         oop o = (oop)p;
 992         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 993 
 994         // Bugfix for systems with weak memory model (PPC64/IA64).
 995         // The object o may be an array. Acquire to make sure that the array
 996         // size (third word) is consistent.
 997         OrderAccess::acquire();
 998 
 999         size_t res = o->size_given_klass(k);
1000         res = adjustObjectSize(res);
1001         assert(res != 0, "Block size should not be 0");
1002         return res;
1003       }
1004     }
1005   }
1006 }
1007 
1008 // TODO: Now that is_parsable is gone, we should combine these two functions.
1009 // A variant of the above that uses the Printezis bits for
1010 // unparsable but allocated objects. This avoids any possible
1011 // stalls waiting for mutators to initialize objects, and is
1012 // thus potentially faster than the variant above. However,
1013 // this variant may return a zero size for a block that is
1014 // under mutation and for which a consistent size cannot be
1015 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1016 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1017                                                      const CMSCollector* c)
1018 const {
1019   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1020   // This must be volatile, or else there is a danger that the compiler
1021   // will compile the code below into a sometimes-infinite loop, by keeping
1022   // the value read the first time in a register.
1023   DEBUG_ONLY(uint loops = 0;)
1024   while (true) {
1025     // We must do this until we get a consistent view of the object.
1026     if (FreeChunk::indicatesFreeChunk(p)) {
1027       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1028       size_t res = fc->size();
1029 
1030       // Bugfix for systems with weak memory model (PPC64/IA64). The
1031       // free bit of the block was set and we have read the size of
1032       // the block. Acquire and check the free bit again. If the
1033       // block is still free, the read size is correct.
1034       OrderAccess::acquire();
1035 
1036       if (FreeChunk::indicatesFreeChunk(p)) {
1037         assert(res != 0, "Block size should not be 0");
1038         assert(loops == 0, "Should be 0");
1039         return res;
1040       }
1041     } else {
1042       // must read from what 'p' points to in each loop.
1043       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1044       // We trust the size of any object that has a non-NULL
1045       // klass and (for those in the perm gen) is parsable
1046       // -- irrespective of its conc_safe-ty.
1047       if (k != NULL) {
1048         assert(k->is_klass(), "Should really be klass oop.");
1049         oop o = (oop)p;
1050         assert(o->is_oop(), "Should be an oop");
1051 
1052         // Bugfix for systems with weak memory model (PPC64/IA64).
1053         // The object o may be an array. Acquire to make sure that the array
1054         // size (third word) is consistent.
1055         OrderAccess::acquire();
1056 
1057         size_t res = o->size_given_klass(k);
1058         res = adjustObjectSize(res);
1059         assert(res != 0, "Block size should not be 0");
1060         return res;
1061       } else {
1062         // May return 0 if P-bits not present.
1063         return c->block_size_if_printezis_bits(p);
1064       }
1065     }
1066     assert(loops == 0, "Can loop at most once");
1067     DEBUG_ONLY(loops++;)
1068   }
1069 }
1070 
1071 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1072   NOT_PRODUCT(verify_objects_initialized());
1073   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1074   FreeChunk* fc = (FreeChunk*)p;
1075   if (fc->is_free()) {
1076     return fc->size();
1077   } else {
1078     // Ignore mark word because this may be a recently promoted
1079     // object whose mark word is used to chain together grey
1080     // objects (the last one would have a null value).
1081     assert(oop(p)->is_oop(true), "Should be an oop");
1082     return adjustObjectSize(oop(p)->size());
1083   }
1084 }
1085 
1086 // This implementation assumes that the property of "being an object" is
1087 // stable.  But being a free chunk may not be (because of parallel
1088 // promotion.)
1089 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1090   FreeChunk* fc = (FreeChunk*)p;
1091   assert(is_in_reserved(p), "Should be in space");
1092   // When doing a mark-sweep-compact of the CMS generation, this
1093   // assertion may fail because prepare_for_compaction() uses
1094   // space that is garbage to maintain information on ranges of
1095   // live objects so that these live ranges can be moved as a whole.
1096   // Comment out this assertion until that problem can be solved
1097   // (i.e., that the block start calculation may look at objects
1098   // at address below "p" in finding the object that contains "p"
1099   // and those objects (if garbage) may have been modified to hold
1100   // live range information.
1101   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1102   //        "Should be a block boundary");
1103   if (FreeChunk::indicatesFreeChunk(p)) return false;
1104   Klass* k = oop(p)->klass_or_null();
1105   if (k != NULL) {
1106     // Ignore mark word because it may have been used to
1107     // chain together promoted objects (the last one
1108     // would have a null value).
1109     assert(oop(p)->is_oop(true), "Should be an oop");
1110     return true;
1111   } else {
1112     return false;  // Was not an object at the start of collection.
1113   }
1114 }
1115 
1116 // Check if the object is alive. This fact is checked either by consulting
1117 // the main marking bitmap in the sweeping phase or, if it's a permanent
1118 // generation and we're not in the sweeping phase, by checking the
1119 // perm_gen_verify_bit_map where we store the "deadness" information if
1120 // we did not sweep the perm gen in the most recent previous GC cycle.
1121 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1122   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1123          "Else races are possible");
1124   assert(block_is_obj(p), "The address should point to an object");
1125 
1126   // If we're sweeping, we use object liveness information from the main bit map
1127   // for both perm gen and old gen.
1128   // We don't need to lock the bitmap (live_map or dead_map below), because
1129   // EITHER we are in the middle of the sweeping phase, and the
1130   // main marking bit map (live_map below) is locked,
1131   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1132   // is stable, because it's mutated only in the sweeping phase.
1133   // NOTE: This method is also used by jmap where, if class unloading is
1134   // off, the results can return "false" for legitimate perm objects,
1135   // when we are not in the midst of a sweeping phase, which can result
1136   // in jmap not reporting certain perm gen objects. This will be moot
1137   // if/when the perm gen goes away in the future.
1138   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1139     CMSBitMap* live_map = _collector->markBitMap();
1140     return live_map->par_isMarked((HeapWord*) p);
1141   }
1142   return true;
1143 }
1144 
1145 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1146   FreeChunk* fc = (FreeChunk*)p;
1147   assert(is_in_reserved(p), "Should be in space");
1148   assert(_bt.block_start(p) == p, "Should be a block boundary");
1149   if (!fc->is_free()) {
1150     // Ignore mark word because it may have been used to
1151     // chain together promoted objects (the last one
1152     // would have a null value).
1153     assert(oop(p)->is_oop(true), "Should be an oop");
1154     return true;
1155   }
1156   return false;
1157 }
1158 
1159 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1160 // approximate answer if you don't hold the freelistlock when you call this.
1161 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1162   size_t size = 0;
1163   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1164     debug_only(
1165       // We may be calling here without the lock in which case we
1166       // won't do this modest sanity check.
1167       if (freelistLock()->owned_by_self()) {
1168         size_t total_list_size = 0;
1169         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1170           fc = fc->next()) {
1171           total_list_size += i;
1172         }
1173         assert(total_list_size == i * _indexedFreeList[i].count(),
1174                "Count in list is incorrect");
1175       }
1176     )
1177     size += i * _indexedFreeList[i].count();
1178   }
1179   return size;
1180 }
1181 
1182 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1183   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1184   return allocate(size);
1185 }
1186 
1187 HeapWord*
1188 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1189   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1190 }
1191 
1192 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1193   assert_lock_strong(freelistLock());
1194   HeapWord* res = NULL;
1195   assert(size == adjustObjectSize(size),
1196          "use adjustObjectSize() before calling into allocate()");
1197 
1198   if (_adaptive_freelists) {
1199     res = allocate_adaptive_freelists(size);
1200   } else {  // non-adaptive free lists
1201     res = allocate_non_adaptive_freelists(size);
1202   }
1203 
1204   if (res != NULL) {
1205     // check that res does lie in this space!
1206     assert(is_in_reserved(res), "Not in this space!");
1207     assert(is_aligned((void*)res), "alignment check");
1208 
1209     FreeChunk* fc = (FreeChunk*)res;
1210     fc->markNotFree();
1211     assert(!fc->is_free(), "shouldn't be marked free");
1212     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1213     // Verify that the block offset table shows this to
1214     // be a single block, but not one which is unallocated.
1215     _bt.verify_single_block(res, size);
1216     _bt.verify_not_unallocated(res, size);
1217     // mangle a just allocated object with a distinct pattern.
1218     debug_only(fc->mangleAllocated(size));
1219   }
1220 
1221   return res;
1222 }
1223 
1224 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1225   HeapWord* res = NULL;
1226   // try and use linear allocation for smaller blocks
1227   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1228     // if successful, the following also adjusts block offset table
1229     res = getChunkFromSmallLinearAllocBlock(size);
1230   }
1231   // Else triage to indexed lists for smaller sizes
1232   if (res == NULL) {
1233     if (size < SmallForDictionary) {
1234       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1235     } else {
1236       // else get it from the big dictionary; if even this doesn't
1237       // work we are out of luck.
1238       res = (HeapWord*)getChunkFromDictionaryExact(size);
1239     }
1240   }
1241 
1242   return res;
1243 }
1244 
1245 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1246   assert_lock_strong(freelistLock());
1247   HeapWord* res = NULL;
1248   assert(size == adjustObjectSize(size),
1249          "use adjustObjectSize() before calling into allocate()");
1250 
1251   // Strategy
1252   //   if small
1253   //     exact size from small object indexed list if small
1254   //     small or large linear allocation block (linAB) as appropriate
1255   //     take from lists of greater sized chunks
1256   //   else
1257   //     dictionary
1258   //     small or large linear allocation block if it has the space
1259   // Try allocating exact size from indexTable first
1260   if (size < IndexSetSize) {
1261     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1262     if(res != NULL) {
1263       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1264         "Not removed from free list");
1265       // no block offset table adjustment is necessary on blocks in
1266       // the indexed lists.
1267 
1268     // Try allocating from the small LinAB
1269     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1270         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1271         // if successful, the above also adjusts block offset table
1272         // Note that this call will refill the LinAB to
1273         // satisfy the request.  This is different that
1274         // evm.
1275         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1276     } else {
1277       // Raid the exact free lists larger than size, even if they are not
1278       // overpopulated.
1279       res = (HeapWord*) getChunkFromGreater(size);
1280     }
1281   } else {
1282     // Big objects get allocated directly from the dictionary.
1283     res = (HeapWord*) getChunkFromDictionaryExact(size);
1284     if (res == NULL) {
1285       // Try hard not to fail since an allocation failure will likely
1286       // trigger a synchronous GC.  Try to get the space from the
1287       // allocation blocks.
1288       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1289     }
1290   }
1291 
1292   return res;
1293 }
1294 
1295 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1296 // when promoting obj.
1297 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1298   // Depending on the object size, expansion may require refilling either a
1299   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1300   // is added because the dictionary may over-allocate to avoid fragmentation.
1301   size_t space = obj_size;
1302   if (!_adaptive_freelists) {
1303     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1304   }
1305   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1306   return space;
1307 }
1308 
1309 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1310   FreeChunk* ret;
1311 
1312   assert(numWords >= MinChunkSize, "Size is less than minimum");
1313   assert(linearAllocationWouldFail() || bestFitFirst(),
1314     "Should not be here");
1315 
1316   size_t i;
1317   size_t currSize = numWords + MinChunkSize;
1318   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1319   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1320     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1321     if (fl->head()) {
1322       ret = getFromListGreater(fl, numWords);
1323       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1324       return ret;
1325     }
1326   }
1327 
1328   currSize = MAX2((size_t)SmallForDictionary,
1329                   (size_t)(numWords + MinChunkSize));
1330 
1331   /* Try to get a chunk that satisfies request, while avoiding
1332      fragmentation that can't be handled. */
1333   {
1334     ret =  dictionary()->get_chunk(currSize);
1335     if (ret != NULL) {
1336       assert(ret->size() - numWords >= MinChunkSize,
1337              "Chunk is too small");
1338       _bt.allocated((HeapWord*)ret, ret->size());
1339       /* Carve returned chunk. */
1340       (void) splitChunkAndReturnRemainder(ret, numWords);
1341       /* Label this as no longer a free chunk. */
1342       assert(ret->is_free(), "This chunk should be free");
1343       ret->link_prev(NULL);
1344     }
1345     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1346     return ret;
1347   }
1348   ShouldNotReachHere();
1349 }
1350 
1351 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1352   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1353   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1354 }
1355 
1356 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1357   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1358          (_smallLinearAllocBlock._word_size == fc->size()),
1359          "Linear allocation block shows incorrect size");
1360   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1361           (_smallLinearAllocBlock._word_size == fc->size()));
1362 }
1363 
1364 // Check if the purported free chunk is present either as a linear
1365 // allocation block, the size-indexed table of (smaller) free blocks,
1366 // or the larger free blocks kept in the binary tree dictionary.
1367 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1368   if (verify_chunk_is_linear_alloc_block(fc)) {
1369     return true;
1370   } else if (fc->size() < IndexSetSize) {
1371     return verifyChunkInIndexedFreeLists(fc);
1372   } else {
1373     return dictionary()->verify_chunk_in_free_list(fc);
1374   }
1375 }
1376 
1377 #ifndef PRODUCT
1378 void CompactibleFreeListSpace::assert_locked() const {
1379   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1380 }
1381 
1382 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1383   CMSLockVerifier::assert_locked(lock);
1384 }
1385 #endif
1386 
1387 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1388   // In the parallel case, the main thread holds the free list lock
1389   // on behalf the parallel threads.
1390   FreeChunk* fc;
1391   {
1392     // If GC is parallel, this might be called by several threads.
1393     // This should be rare enough that the locking overhead won't affect
1394     // the sequential code.
1395     MutexLockerEx x(parDictionaryAllocLock(),
1396                     Mutex::_no_safepoint_check_flag);
1397     fc = getChunkFromDictionary(size);
1398   }
1399   if (fc != NULL) {
1400     fc->dontCoalesce();
1401     assert(fc->is_free(), "Should be free, but not coalescable");
1402     // Verify that the block offset table shows this to
1403     // be a single block, but not one which is unallocated.
1404     _bt.verify_single_block((HeapWord*)fc, fc->size());
1405     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1406   }
1407   return fc;
1408 }
1409 
1410 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1411   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1412   assert_locked();
1413 
1414   // if we are tracking promotions, then first ensure space for
1415   // promotion (including spooling space for saving header if necessary).
1416   // then allocate and copy, then track promoted info if needed.
1417   // When tracking (see PromotionInfo::track()), the mark word may
1418   // be displaced and in this case restoration of the mark word
1419   // occurs in the (oop_since_save_marks_)iterate phase.
1420   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1421     return NULL;
1422   }
1423   // Call the allocate(size_t, bool) form directly to avoid the
1424   // additional call through the allocate(size_t) form.  Having
1425   // the compile inline the call is problematic because allocate(size_t)
1426   // is a virtual method.
1427   HeapWord* res = allocate(adjustObjectSize(obj_size));
1428   if (res != NULL) {
1429     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1430     // if we should be tracking promotions, do so.
1431     if (_promoInfo.tracking()) {
1432         _promoInfo.track((PromotedObject*)res);
1433     }
1434   }
1435   return oop(res);
1436 }
1437 
1438 HeapWord*
1439 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1440   assert_locked();
1441   assert(size >= MinChunkSize, "minimum chunk size");
1442   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1443     "maximum from smallLinearAllocBlock");
1444   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1445 }
1446 
1447 HeapWord*
1448 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1449                                                        size_t size) {
1450   assert_locked();
1451   assert(size >= MinChunkSize, "too small");
1452   HeapWord* res = NULL;
1453   // Try to do linear allocation from blk, making sure that
1454   if (blk->_word_size == 0) {
1455     // We have probably been unable to fill this either in the prologue or
1456     // when it was exhausted at the last linear allocation. Bail out until
1457     // next time.
1458     assert(blk->_ptr == NULL, "consistency check");
1459     return NULL;
1460   }
1461   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1462   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1463   if (res != NULL) return res;
1464 
1465   // about to exhaust this linear allocation block
1466   if (blk->_word_size == size) { // exactly satisfied
1467     res = blk->_ptr;
1468     _bt.allocated(res, blk->_word_size);
1469   } else if (size + MinChunkSize <= blk->_refillSize) {
1470     size_t sz = blk->_word_size;
1471     // Update _unallocated_block if the size is such that chunk would be
1472     // returned to the indexed free list.  All other chunks in the indexed
1473     // free lists are allocated from the dictionary so that _unallocated_block
1474     // has already been adjusted for them.  Do it here so that the cost
1475     // for all chunks added back to the indexed free lists.
1476     if (sz < SmallForDictionary) {
1477       _bt.allocated(blk->_ptr, sz);
1478     }
1479     // Return the chunk that isn't big enough, and then refill below.
1480     addChunkToFreeLists(blk->_ptr, sz);
1481     split_birth(sz);
1482     // Don't keep statistics on adding back chunk from a LinAB.
1483   } else {
1484     // A refilled block would not satisfy the request.
1485     return NULL;
1486   }
1487 
1488   blk->_ptr = NULL; blk->_word_size = 0;
1489   refillLinearAllocBlock(blk);
1490   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1491          "block was replenished");
1492   if (res != NULL) {
1493     split_birth(size);
1494     repairLinearAllocBlock(blk);
1495   } else if (blk->_ptr != NULL) {
1496     res = blk->_ptr;
1497     size_t blk_size = blk->_word_size;
1498     blk->_word_size -= size;
1499     blk->_ptr  += size;
1500     split_birth(size);
1501     repairLinearAllocBlock(blk);
1502     // Update BOT last so that other (parallel) GC threads see a consistent
1503     // view of the BOT and free blocks.
1504     // Above must occur before BOT is updated below.
1505     OrderAccess::storestore();
1506     _bt.split_block(res, blk_size, size);  // adjust block offset table
1507   }
1508   return res;
1509 }
1510 
1511 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1512                                         LinearAllocBlock* blk,
1513                                         size_t size) {
1514   assert_locked();
1515   assert(size >= MinChunkSize, "too small");
1516 
1517   HeapWord* res = NULL;
1518   // This is the common case.  Keep it simple.
1519   if (blk->_word_size >= size + MinChunkSize) {
1520     assert(blk->_ptr != NULL, "consistency check");
1521     res = blk->_ptr;
1522     // Note that the BOT is up-to-date for the linAB before allocation.  It
1523     // indicates the start of the linAB.  The split_block() updates the
1524     // BOT for the linAB after the allocation (indicates the start of the
1525     // next chunk to be allocated).
1526     size_t blk_size = blk->_word_size;
1527     blk->_word_size -= size;
1528     blk->_ptr  += size;
1529     split_birth(size);
1530     repairLinearAllocBlock(blk);
1531     // Update BOT last so that other (parallel) GC threads see a consistent
1532     // view of the BOT and free blocks.
1533     // Above must occur before BOT is updated below.
1534     OrderAccess::storestore();
1535     _bt.split_block(res, blk_size, size);  // adjust block offset table
1536     _bt.allocated(res, size);
1537   }
1538   return res;
1539 }
1540 
1541 FreeChunk*
1542 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1543   assert_locked();
1544   assert(size < SmallForDictionary, "just checking");
1545   FreeChunk* res;
1546   res = _indexedFreeList[size].get_chunk_at_head();
1547   if (res == NULL) {
1548     res = getChunkFromIndexedFreeListHelper(size);
1549   }
1550   _bt.verify_not_unallocated((HeapWord*) res, size);
1551   assert(res == NULL || res->size() == size, "Incorrect block size");
1552   return res;
1553 }
1554 
1555 FreeChunk*
1556 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1557   bool replenish) {
1558   assert_locked();
1559   FreeChunk* fc = NULL;
1560   if (size < SmallForDictionary) {
1561     assert(_indexedFreeList[size].head() == NULL ||
1562       _indexedFreeList[size].surplus() <= 0,
1563       "List for this size should be empty or under populated");
1564     // Try best fit in exact lists before replenishing the list
1565     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1566       // Replenish list.
1567       //
1568       // Things tried that failed.
1569       //   Tried allocating out of the two LinAB's first before
1570       // replenishing lists.
1571       //   Tried small linAB of size 256 (size in indexed list)
1572       // and replenishing indexed lists from the small linAB.
1573       //
1574       FreeChunk* newFc = NULL;
1575       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1576       if (replenish_size < SmallForDictionary) {
1577         // Do not replenish from an underpopulated size.
1578         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1579             _indexedFreeList[replenish_size].head() != NULL) {
1580           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1581         } else if (bestFitFirst()) {
1582           newFc = bestFitSmall(replenish_size);
1583         }
1584       }
1585       if (newFc == NULL && replenish_size > size) {
1586         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1587         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1588       }
1589       // Note: The stats update re split-death of block obtained above
1590       // will be recorded below precisely when we know we are going to
1591       // be actually splitting it into more than one pieces below.
1592       if (newFc != NULL) {
1593         if  (replenish || CMSReplenishIntermediate) {
1594           // Replenish this list and return one block to caller.
1595           size_t i;
1596           FreeChunk *curFc, *nextFc;
1597           size_t num_blk = newFc->size() / size;
1598           assert(num_blk >= 1, "Smaller than requested?");
1599           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1600           if (num_blk > 1) {
1601             // we are sure we will be splitting the block just obtained
1602             // into multiple pieces; record the split-death of the original
1603             splitDeath(replenish_size);
1604           }
1605           // carve up and link blocks 0, ..., num_blk - 2
1606           // The last chunk is not added to the lists but is returned as the
1607           // free chunk.
1608           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1609                i = 0;
1610                i < (num_blk - 1);
1611                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1612                i++) {
1613             curFc->set_size(size);
1614             // Don't record this as a return in order to try and
1615             // determine the "returns" from a GC.
1616             _bt.verify_not_unallocated((HeapWord*) fc, size);
1617             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1618             _bt.mark_block((HeapWord*)curFc, size);
1619             split_birth(size);
1620             // Don't record the initial population of the indexed list
1621             // as a split birth.
1622           }
1623 
1624           // check that the arithmetic was OK above
1625           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1626             "inconsistency in carving newFc");
1627           curFc->set_size(size);
1628           _bt.mark_block((HeapWord*)curFc, size);
1629           split_birth(size);
1630           fc = curFc;
1631         } else {
1632           // Return entire block to caller
1633           fc = newFc;
1634         }
1635       }
1636     }
1637   } else {
1638     // Get a free chunk from the free chunk dictionary to be returned to
1639     // replenish the indexed free list.
1640     fc = getChunkFromDictionaryExact(size);
1641   }
1642   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1643   return fc;
1644 }
1645 
1646 FreeChunk*
1647 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1648   assert_locked();
1649   FreeChunk* fc = _dictionary->get_chunk(size,
1650                                          FreeBlockDictionary<FreeChunk>::atLeast);
1651   if (fc == NULL) {
1652     return NULL;
1653   }
1654   _bt.allocated((HeapWord*)fc, fc->size());
1655   if (fc->size() >= size + MinChunkSize) {
1656     fc = splitChunkAndReturnRemainder(fc, size);
1657   }
1658   assert(fc->size() >= size, "chunk too small");
1659   assert(fc->size() < size + MinChunkSize, "chunk too big");
1660   _bt.verify_single_block((HeapWord*)fc, fc->size());
1661   return fc;
1662 }
1663 
1664 FreeChunk*
1665 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1666   assert_locked();
1667   FreeChunk* fc = _dictionary->get_chunk(size,
1668                                          FreeBlockDictionary<FreeChunk>::atLeast);
1669   if (fc == NULL) {
1670     return fc;
1671   }
1672   _bt.allocated((HeapWord*)fc, fc->size());
1673   if (fc->size() == size) {
1674     _bt.verify_single_block((HeapWord*)fc, size);
1675     return fc;
1676   }
1677   assert(fc->size() > size, "get_chunk() guarantee");
1678   if (fc->size() < size + MinChunkSize) {
1679     // Return the chunk to the dictionary and go get a bigger one.
1680     returnChunkToDictionary(fc);
1681     fc = _dictionary->get_chunk(size + MinChunkSize,
1682                                 FreeBlockDictionary<FreeChunk>::atLeast);
1683     if (fc == NULL) {
1684       return NULL;
1685     }
1686     _bt.allocated((HeapWord*)fc, fc->size());
1687   }
1688   assert(fc->size() >= size + MinChunkSize, "tautology");
1689   fc = splitChunkAndReturnRemainder(fc, size);
1690   assert(fc->size() == size, "chunk is wrong size");
1691   _bt.verify_single_block((HeapWord*)fc, size);
1692   return fc;
1693 }
1694 
1695 void
1696 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1697   assert_locked();
1698 
1699   size_t size = chunk->size();
1700   _bt.verify_single_block((HeapWord*)chunk, size);
1701   // adjust _unallocated_block downward, as necessary
1702   _bt.freed((HeapWord*)chunk, size);
1703   _dictionary->return_chunk(chunk);
1704 #ifndef PRODUCT
1705   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1706     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1707     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1708     tl->verify_stats();
1709   }
1710 #endif // PRODUCT
1711 }
1712 
1713 void
1714 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1715   assert_locked();
1716   size_t size = fc->size();
1717   _bt.verify_single_block((HeapWord*) fc, size);
1718   _bt.verify_not_unallocated((HeapWord*) fc, size);
1719   if (_adaptive_freelists) {
1720     _indexedFreeList[size].return_chunk_at_tail(fc);
1721   } else {
1722     _indexedFreeList[size].return_chunk_at_head(fc);
1723   }
1724 #ifndef PRODUCT
1725   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1726      _indexedFreeList[size].verify_stats();
1727   }
1728 #endif // PRODUCT
1729 }
1730 
1731 // Add chunk to end of last block -- if it's the largest
1732 // block -- and update BOT and census data. We would
1733 // of course have preferred to coalesce it with the
1734 // last block, but it's currently less expensive to find the
1735 // largest block than it is to find the last.
1736 void
1737 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1738   HeapWord* chunk, size_t     size) {
1739   // check that the chunk does lie in this space!
1740   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1741   // One of the parallel gc task threads may be here
1742   // whilst others are allocating.
1743   Mutex* lock = NULL;
1744   if (ParallelGCThreads != 0) {
1745     lock = &_parDictionaryAllocLock;
1746   }
1747   FreeChunk* ec;
1748   {
1749     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1750     ec = dictionary()->find_largest_dict();  // get largest block
1751     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1752       // It's a coterminal block - we can coalesce.
1753       size_t old_size = ec->size();
1754       coalDeath(old_size);
1755       removeChunkFromDictionary(ec);
1756       size += old_size;
1757     } else {
1758       ec = (FreeChunk*)chunk;
1759     }
1760   }
1761   ec->set_size(size);
1762   debug_only(ec->mangleFreed(size));
1763   if (size < SmallForDictionary && ParallelGCThreads != 0) {
1764     lock = _indexedFreeListParLocks[size];
1765   }
1766   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1767   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1768   // record the birth under the lock since the recording involves
1769   // manipulation of the list on which the chunk lives and
1770   // if the chunk is allocated and is the last on the list,
1771   // the list can go away.
1772   coalBirth(size);
1773 }
1774 
1775 void
1776 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1777                                               size_t     size) {
1778   // check that the chunk does lie in this space!
1779   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1780   assert_locked();
1781   _bt.verify_single_block(chunk, size);
1782 
1783   FreeChunk* fc = (FreeChunk*) chunk;
1784   fc->set_size(size);
1785   debug_only(fc->mangleFreed(size));
1786   if (size < SmallForDictionary) {
1787     returnChunkToFreeList(fc);
1788   } else {
1789     returnChunkToDictionary(fc);
1790   }
1791 }
1792 
1793 void
1794 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1795   size_t size, bool coalesced) {
1796   assert_locked();
1797   assert(chunk != NULL, "null chunk");
1798   if (coalesced) {
1799     // repair BOT
1800     _bt.single_block(chunk, size);
1801   }
1802   addChunkToFreeLists(chunk, size);
1803 }
1804 
1805 // We _must_ find the purported chunk on our free lists;
1806 // we assert if we don't.
1807 void
1808 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1809   size_t size = fc->size();
1810   assert_locked();
1811   debug_only(verifyFreeLists());
1812   if (size < SmallForDictionary) {
1813     removeChunkFromIndexedFreeList(fc);
1814   } else {
1815     removeChunkFromDictionary(fc);
1816   }
1817   _bt.verify_single_block((HeapWord*)fc, size);
1818   debug_only(verifyFreeLists());
1819 }
1820 
1821 void
1822 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1823   size_t size = fc->size();
1824   assert_locked();
1825   assert(fc != NULL, "null chunk");
1826   _bt.verify_single_block((HeapWord*)fc, size);
1827   _dictionary->remove_chunk(fc);
1828   // adjust _unallocated_block upward, as necessary
1829   _bt.allocated((HeapWord*)fc, size);
1830 }
1831 
1832 void
1833 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1834   assert_locked();
1835   size_t size = fc->size();
1836   _bt.verify_single_block((HeapWord*)fc, size);
1837   NOT_PRODUCT(
1838     if (FLSVerifyIndexTable) {
1839       verifyIndexedFreeList(size);
1840     }
1841   )
1842   _indexedFreeList[size].remove_chunk(fc);
1843   NOT_PRODUCT(
1844     if (FLSVerifyIndexTable) {
1845       verifyIndexedFreeList(size);
1846     }
1847   )
1848 }
1849 
1850 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1851   /* A hint is the next larger size that has a surplus.
1852      Start search at a size large enough to guarantee that
1853      the excess is >= MIN_CHUNK. */
1854   size_t start = align_object_size(numWords + MinChunkSize);
1855   if (start < IndexSetSize) {
1856     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1857     size_t    hint = _indexedFreeList[start].hint();
1858     while (hint < IndexSetSize) {
1859       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1860       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1861       if (fl->surplus() > 0 && fl->head() != NULL) {
1862         // Found a list with surplus, reset original hint
1863         // and split out a free chunk which is returned.
1864         _indexedFreeList[start].set_hint(hint);
1865         FreeChunk* res = getFromListGreater(fl, numWords);
1866         assert(res == NULL || res->is_free(),
1867           "Should be returning a free chunk");
1868         return res;
1869       }
1870       hint = fl->hint(); /* keep looking */
1871     }
1872     /* None found. */
1873     it[start].set_hint(IndexSetSize);
1874   }
1875   return NULL;
1876 }
1877 
1878 /* Requires fl->size >= numWords + MinChunkSize */
1879 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1880   size_t numWords) {
1881   FreeChunk *curr = fl->head();
1882   size_t oldNumWords = curr->size();
1883   assert(numWords >= MinChunkSize, "Word size is too small");
1884   assert(curr != NULL, "List is empty");
1885   assert(oldNumWords >= numWords + MinChunkSize,
1886         "Size of chunks in the list is too small");
1887 
1888   fl->remove_chunk(curr);
1889   // recorded indirectly by splitChunkAndReturnRemainder -
1890   // smallSplit(oldNumWords, numWords);
1891   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1892   // Does anything have to be done for the remainder in terms of
1893   // fixing the card table?
1894   assert(new_chunk == NULL || new_chunk->is_free(),
1895     "Should be returning a free chunk");
1896   return new_chunk;
1897 }
1898 
1899 FreeChunk*
1900 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1901   size_t new_size) {
1902   assert_locked();
1903   size_t size = chunk->size();
1904   assert(size > new_size, "Split from a smaller block?");
1905   assert(is_aligned(chunk), "alignment problem");
1906   assert(size == adjustObjectSize(size), "alignment problem");
1907   size_t rem_size = size - new_size;
1908   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1909   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1910   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1911   assert(is_aligned(ffc), "alignment problem");
1912   ffc->set_size(rem_size);
1913   ffc->link_next(NULL);
1914   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1915   // Above must occur before BOT is updated below.
1916   // adjust block offset table
1917   OrderAccess::storestore();
1918   assert(chunk->is_free() && ffc->is_free(), "Error");
1919   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1920   if (rem_size < SmallForDictionary) {
1921     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1922     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1923     assert(!is_par ||
1924            (SharedHeap::heap()->n_par_threads() ==
1925             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1926     returnChunkToFreeList(ffc);
1927     split(size, rem_size);
1928     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1929   } else {
1930     returnChunkToDictionary(ffc);
1931     split(size ,rem_size);
1932   }
1933   chunk->set_size(new_size);
1934   return chunk;
1935 }
1936 
1937 void
1938 CompactibleFreeListSpace::sweep_completed() {
1939   // Now that space is probably plentiful, refill linear
1940   // allocation blocks as needed.
1941   refillLinearAllocBlocksIfNeeded();
1942 }
1943 
1944 void
1945 CompactibleFreeListSpace::gc_prologue() {
1946   assert_locked();
1947   if (PrintFLSStatistics != 0) {
1948     gclog_or_tty->print("Before GC:\n");
1949     reportFreeListStatistics();
1950   }
1951   refillLinearAllocBlocksIfNeeded();
1952 }
1953 
1954 void
1955 CompactibleFreeListSpace::gc_epilogue() {
1956   assert_locked();
1957   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1958     if (_smallLinearAllocBlock._word_size == 0)
1959       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1960   }
1961   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1962   _promoInfo.stopTrackingPromotions();
1963   repairLinearAllocationBlocks();
1964   // Print Space's stats
1965   if (PrintFLSStatistics != 0) {
1966     gclog_or_tty->print("After GC:\n");
1967     reportFreeListStatistics();
1968   }
1969 }
1970 
1971 // Iteration support, mostly delegated from a CMS generation
1972 
1973 void CompactibleFreeListSpace::save_marks() {
1974   assert(Thread::current()->is_VM_thread(),
1975          "Global variable should only be set when single-threaded");
1976   // Mark the "end" of the used space at the time of this call;
1977   // note, however, that promoted objects from this point
1978   // on are tracked in the _promoInfo below.
1979   set_saved_mark_word(unallocated_block());
1980 #ifdef ASSERT
1981   // Check the sanity of save_marks() etc.
1982   MemRegion ur    = used_region();
1983   MemRegion urasm = used_region_at_save_marks();
1984   assert(ur.contains(urasm),
1985          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1986                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1987                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1988 #endif
1989   // inform allocator that promotions should be tracked.
1990   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1991   _promoInfo.startTrackingPromotions();
1992 }
1993 
1994 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1995   assert(_promoInfo.tracking(), "No preceding save_marks?");
1996   assert(SharedHeap::heap()->n_par_threads() == 0,
1997          "Shouldn't be called if using parallel gc.");
1998   return _promoInfo.noPromotions();
1999 }
2000 
2001 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2002                                                                             \
2003 void CompactibleFreeListSpace::                                             \
2004 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2005   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2006          "Shouldn't be called (yet) during parallel part of gc.");          \
2007   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2008   /*                                                                        \
2009    * This also restores any displaced headers and removes the elements from \
2010    * the iteration set as they are processed, so that we have a clean slate \
2011    * at the end of the iteration. Note, thus, that if new objects are       \
2012    * promoted as a result of the iteration they are iterated over as well.  \
2013    */                                                                       \
2014   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2015 }
2016 
2017 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2018 
2019 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2020   return _smallLinearAllocBlock._word_size == 0;
2021 }
2022 
2023 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2024   // Fix up linear allocation blocks to look like free blocks
2025   repairLinearAllocBlock(&_smallLinearAllocBlock);
2026 }
2027 
2028 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2029   assert_locked();
2030   if (blk->_ptr != NULL) {
2031     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2032            "Minimum block size requirement");
2033     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2034     fc->set_size(blk->_word_size);
2035     fc->link_prev(NULL);   // mark as free
2036     fc->dontCoalesce();
2037     assert(fc->is_free(), "just marked it free");
2038     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2039   }
2040 }
2041 
2042 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2043   assert_locked();
2044   if (_smallLinearAllocBlock._ptr == NULL) {
2045     assert(_smallLinearAllocBlock._word_size == 0,
2046       "Size of linAB should be zero if the ptr is NULL");
2047     // Reset the linAB refill and allocation size limit.
2048     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2049   }
2050   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2051 }
2052 
2053 void
2054 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2055   assert_locked();
2056   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2057          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2058          "blk invariant");
2059   if (blk->_ptr == NULL) {
2060     refillLinearAllocBlock(blk);
2061   }
2062   if (PrintMiscellaneous && Verbose) {
2063     if (blk->_word_size == 0) {
2064       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2065     }
2066   }
2067 }
2068 
2069 void
2070 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2071   assert_locked();
2072   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2073          "linear allocation block should be empty");
2074   FreeChunk* fc;
2075   if (blk->_refillSize < SmallForDictionary &&
2076       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2077     // A linAB's strategy might be to use small sizes to reduce
2078     // fragmentation but still get the benefits of allocation from a
2079     // linAB.
2080   } else {
2081     fc = getChunkFromDictionary(blk->_refillSize);
2082   }
2083   if (fc != NULL) {
2084     blk->_ptr  = (HeapWord*)fc;
2085     blk->_word_size = fc->size();
2086     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2087   }
2088 }
2089 
2090 // Support for concurrent collection policy decisions.
2091 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2092   // In the future we might want to add in fragmentation stats --
2093   // including erosion of the "mountain" into this decision as well.
2094   return !adaptive_freelists() && linearAllocationWouldFail();
2095 }
2096 
2097 // Support for compaction
2098 
2099 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2100   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2101   // Prepare_for_compaction() uses the space between live objects
2102   // so that later phase can skip dead space quickly.  So verification
2103   // of the free lists doesn't work after.
2104 }
2105 
2106 #define obj_size(q) adjustObjectSize(oop(q)->size())
2107 #define adjust_obj_size(s) adjustObjectSize(s)
2108 
2109 void CompactibleFreeListSpace::adjust_pointers() {
2110   // In other versions of adjust_pointers(), a bail out
2111   // based on the amount of live data in the generation
2112   // (i.e., if 0, bail out) may be used.
2113   // Cannot test used() == 0 here because the free lists have already
2114   // been mangled by the compaction.
2115 
2116   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2117   // See note about verification in prepare_for_compaction().
2118 }
2119 
2120 void CompactibleFreeListSpace::compact() {
2121   SCAN_AND_COMPACT(obj_size);
2122 }
2123 
2124 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2125 // where fbs is free block sizes
2126 double CompactibleFreeListSpace::flsFrag() const {
2127   size_t itabFree = totalSizeInIndexedFreeLists();
2128   double frag = 0.0;
2129   size_t i;
2130 
2131   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2132     double sz  = i;
2133     frag      += _indexedFreeList[i].count() * (sz * sz);
2134   }
2135 
2136   double totFree = itabFree +
2137                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2138   if (totFree > 0) {
2139     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2140             (totFree * totFree));
2141     frag = (double)1.0  - frag;
2142   } else {
2143     assert(frag == 0.0, "Follows from totFree == 0");
2144   }
2145   return frag;
2146 }
2147 
2148 void CompactibleFreeListSpace::beginSweepFLCensus(
2149   float inter_sweep_current,
2150   float inter_sweep_estimate,
2151   float intra_sweep_estimate) {
2152   assert_locked();
2153   size_t i;
2154   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2155     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2156     if (PrintFLSStatistics > 1) {
2157       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2158     }
2159     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2160     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2161     fl->set_before_sweep(fl->count());
2162     fl->set_bfr_surp(fl->surplus());
2163   }
2164   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2165                                     inter_sweep_current,
2166                                     inter_sweep_estimate,
2167                                     intra_sweep_estimate);
2168 }
2169 
2170 void CompactibleFreeListSpace::setFLSurplus() {
2171   assert_locked();
2172   size_t i;
2173   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2174     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2175     fl->set_surplus(fl->count() -
2176                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2177   }
2178 }
2179 
2180 void CompactibleFreeListSpace::setFLHints() {
2181   assert_locked();
2182   size_t i;
2183   size_t h = IndexSetSize;
2184   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2185     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2186     fl->set_hint(h);
2187     if (fl->surplus() > 0) {
2188       h = i;
2189     }
2190   }
2191 }
2192 
2193 void CompactibleFreeListSpace::clearFLCensus() {
2194   assert_locked();
2195   size_t i;
2196   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2197     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2198     fl->set_prev_sweep(fl->count());
2199     fl->set_coal_births(0);
2200     fl->set_coal_deaths(0);
2201     fl->set_split_births(0);
2202     fl->set_split_deaths(0);
2203   }
2204 }
2205 
2206 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2207   if (PrintFLSStatistics > 0) {
2208     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2209     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2210                            p2i(largestAddr));
2211   }
2212   setFLSurplus();
2213   setFLHints();
2214   if (PrintGC && PrintFLSCensus > 0) {
2215     printFLCensus(sweep_count);
2216   }
2217   clearFLCensus();
2218   assert_locked();
2219   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2220 }
2221 
2222 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2223   if (size < SmallForDictionary) {
2224     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2225     return (fl->coal_desired() < 0) ||
2226            ((int)fl->count() > fl->coal_desired());
2227   } else {
2228     return dictionary()->coal_dict_over_populated(size);
2229   }
2230 }
2231 
2232 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2233   assert(size < SmallForDictionary, "Size too large for indexed list");
2234   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2235   fl->increment_coal_births();
2236   fl->increment_surplus();
2237 }
2238 
2239 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2240   assert(size < SmallForDictionary, "Size too large for indexed list");
2241   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2242   fl->increment_coal_deaths();
2243   fl->decrement_surplus();
2244 }
2245 
2246 void CompactibleFreeListSpace::coalBirth(size_t size) {
2247   if (size  < SmallForDictionary) {
2248     smallCoalBirth(size);
2249   } else {
2250     dictionary()->dict_census_update(size,
2251                                    false /* split */,
2252                                    true /* birth */);
2253   }
2254 }
2255 
2256 void CompactibleFreeListSpace::coalDeath(size_t size) {
2257   if(size  < SmallForDictionary) {
2258     smallCoalDeath(size);
2259   } else {
2260     dictionary()->dict_census_update(size,
2261                                    false /* split */,
2262                                    false /* birth */);
2263   }
2264 }
2265 
2266 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2267   assert(size < SmallForDictionary, "Size too large for indexed list");
2268   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2269   fl->increment_split_births();
2270   fl->increment_surplus();
2271 }
2272 
2273 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2274   assert(size < SmallForDictionary, "Size too large for indexed list");
2275   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2276   fl->increment_split_deaths();
2277   fl->decrement_surplus();
2278 }
2279 
2280 void CompactibleFreeListSpace::split_birth(size_t size) {
2281   if (size  < SmallForDictionary) {
2282     smallSplitBirth(size);
2283   } else {
2284     dictionary()->dict_census_update(size,
2285                                    true /* split */,
2286                                    true /* birth */);
2287   }
2288 }
2289 
2290 void CompactibleFreeListSpace::splitDeath(size_t size) {
2291   if (size  < SmallForDictionary) {
2292     smallSplitDeath(size);
2293   } else {
2294     dictionary()->dict_census_update(size,
2295                                    true /* split */,
2296                                    false /* birth */);
2297   }
2298 }
2299 
2300 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2301   size_t to2 = from - to1;
2302   splitDeath(from);
2303   split_birth(to1);
2304   split_birth(to2);
2305 }
2306 
2307 void CompactibleFreeListSpace::print() const {
2308   print_on(tty);
2309 }
2310 
2311 void CompactibleFreeListSpace::prepare_for_verify() {
2312   assert_locked();
2313   repairLinearAllocationBlocks();
2314   // Verify that the SpoolBlocks look like free blocks of
2315   // appropriate sizes... To be done ...
2316 }
2317 
2318 class VerifyAllBlksClosure: public BlkClosure {
2319  private:
2320   const CompactibleFreeListSpace* _sp;
2321   const MemRegion                 _span;
2322   HeapWord*                       _last_addr;
2323   size_t                          _last_size;
2324   bool                            _last_was_obj;
2325   bool                            _last_was_live;
2326 
2327  public:
2328   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2329     MemRegion span) :  _sp(sp), _span(span),
2330                        _last_addr(NULL), _last_size(0),
2331                        _last_was_obj(false), _last_was_live(false) { }
2332 
2333   virtual size_t do_blk(HeapWord* addr) {
2334     size_t res;
2335     bool   was_obj  = false;
2336     bool   was_live = false;
2337     if (_sp->block_is_obj(addr)) {
2338       was_obj = true;
2339       oop p = oop(addr);
2340       guarantee(p->is_oop(), "Should be an oop");
2341       res = _sp->adjustObjectSize(p->size());
2342       if (_sp->obj_is_alive(addr)) {
2343         was_live = true;
2344         p->verify();
2345       }
2346     } else {
2347       FreeChunk* fc = (FreeChunk*)addr;
2348       res = fc->size();
2349       if (FLSVerifyLists && !fc->cantCoalesce()) {
2350         guarantee(_sp->verify_chunk_in_free_list(fc),
2351                   "Chunk should be on a free list");
2352       }
2353     }
2354     if (res == 0) {
2355       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2356       gclog_or_tty->print_cr(
2357         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2358         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2359         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2360         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2361       _sp->print_on(gclog_or_tty);
2362       guarantee(false, "Seppuku!");
2363     }
2364     _last_addr = addr;
2365     _last_size = res;
2366     _last_was_obj  = was_obj;
2367     _last_was_live = was_live;
2368     return res;
2369   }
2370 };
2371 
2372 class VerifyAllOopsClosure: public OopClosure {
2373  private:
2374   const CMSCollector*             _collector;
2375   const CompactibleFreeListSpace* _sp;
2376   const MemRegion                 _span;
2377   const bool                      _past_remark;
2378   const CMSBitMap*                _bit_map;
2379 
2380  protected:
2381   void do_oop(void* p, oop obj) {
2382     if (_span.contains(obj)) { // the interior oop points into CMS heap
2383       if (!_span.contains(p)) { // reference from outside CMS heap
2384         // Should be a valid object; the first disjunct below allows
2385         // us to sidestep an assertion in block_is_obj() that insists
2386         // that p be in _sp. Note that several generations (and spaces)
2387         // are spanned by _span (CMS heap) above.
2388         guarantee(!_sp->is_in_reserved(obj) ||
2389                   _sp->block_is_obj((HeapWord*)obj),
2390                   "Should be an object");
2391         guarantee(obj->is_oop(), "Should be an oop");
2392         obj->verify();
2393         if (_past_remark) {
2394           // Remark has been completed, the object should be marked
2395           _bit_map->isMarked((HeapWord*)obj);
2396         }
2397       } else { // reference within CMS heap
2398         if (_past_remark) {
2399           // Remark has been completed -- so the referent should have
2400           // been marked, if referring object is.
2401           if (_bit_map->isMarked(_collector->block_start(p))) {
2402             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2403           }
2404         }
2405       }
2406     } else if (_sp->is_in_reserved(p)) {
2407       // the reference is from FLS, and points out of FLS
2408       guarantee(obj->is_oop(), "Should be an oop");
2409       obj->verify();
2410     }
2411   }
2412 
2413   template <class T> void do_oop_work(T* p) {
2414     T heap_oop = oopDesc::load_heap_oop(p);
2415     if (!oopDesc::is_null(heap_oop)) {
2416       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2417       do_oop(p, obj);
2418     }
2419   }
2420 
2421  public:
2422   VerifyAllOopsClosure(const CMSCollector* collector,
2423     const CompactibleFreeListSpace* sp, MemRegion span,
2424     bool past_remark, CMSBitMap* bit_map) :
2425     _collector(collector), _sp(sp), _span(span),
2426     _past_remark(past_remark), _bit_map(bit_map) { }
2427 
2428   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2429   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2430 };
2431 
2432 void CompactibleFreeListSpace::verify() const {
2433   assert_lock_strong(&_freelistLock);
2434   verify_objects_initialized();
2435   MemRegion span = _collector->_span;
2436   bool past_remark = (_collector->abstract_state() ==
2437                       CMSCollector::Sweeping);
2438 
2439   ResourceMark rm;
2440   HandleMark  hm;
2441 
2442   // Check integrity of CFL data structures
2443   _promoInfo.verify();
2444   _dictionary->verify();
2445   if (FLSVerifyIndexTable) {
2446     verifyIndexedFreeLists();
2447   }
2448   // Check integrity of all objects and free blocks in space
2449   {
2450     VerifyAllBlksClosure cl(this, span);
2451     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2452   }
2453   // Check that all references in the heap to FLS
2454   // are to valid objects in FLS or that references in
2455   // FLS are to valid objects elsewhere in the heap
2456   if (FLSVerifyAllHeapReferences)
2457   {
2458     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2459       _collector->markBitMap());
2460     CollectedHeap* ch = Universe::heap();
2461 
2462     // Iterate over all oops in the heap. Uses the _no_header version
2463     // since we are not interested in following the klass pointers.
2464     ch->oop_iterate_no_header(&cl);
2465   }
2466 
2467   if (VerifyObjectStartArray) {
2468     // Verify the block offset table
2469     _bt.verify();
2470   }
2471 }
2472 
2473 #ifndef PRODUCT
2474 void CompactibleFreeListSpace::verifyFreeLists() const {
2475   if (FLSVerifyLists) {
2476     _dictionary->verify();
2477     verifyIndexedFreeLists();
2478   } else {
2479     if (FLSVerifyDictionary) {
2480       _dictionary->verify();
2481     }
2482     if (FLSVerifyIndexTable) {
2483       verifyIndexedFreeLists();
2484     }
2485   }
2486 }
2487 #endif
2488 
2489 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2490   size_t i = 0;
2491   for (; i < IndexSetStart; i++) {
2492     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2493   }
2494   for (; i < IndexSetSize; i++) {
2495     verifyIndexedFreeList(i);
2496   }
2497 }
2498 
2499 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2500   FreeChunk* fc   =  _indexedFreeList[size].head();
2501   FreeChunk* tail =  _indexedFreeList[size].tail();
2502   size_t    num = _indexedFreeList[size].count();
2503   size_t      n = 0;
2504   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2505             "Slot should have been empty");
2506   for (; fc != NULL; fc = fc->next(), n++) {
2507     guarantee(fc->size() == size, "Size inconsistency");
2508     guarantee(fc->is_free(), "!free?");
2509     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2510     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2511   }
2512   guarantee(n == num, "Incorrect count");
2513 }
2514 
2515 #ifndef PRODUCT
2516 void CompactibleFreeListSpace::check_free_list_consistency() const {
2517   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2518     "Some sizes can't be allocated without recourse to"
2519     " linear allocation buffers");
2520   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2521     "else MIN_TREE_CHUNK_SIZE is wrong");
2522   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2523   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2524 }
2525 #endif
2526 
2527 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2528   assert_lock_strong(&_freelistLock);
2529   AdaptiveFreeList<FreeChunk> total;
2530   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2531   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2532   size_t total_free = 0;
2533   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2534     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2535     total_free += fl->count() * fl->size();
2536     if (i % (40*IndexSetStride) == 0) {
2537       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2538     }
2539     fl->print_on(gclog_or_tty);
2540     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2541     total.set_surplus(    total.surplus()     + fl->surplus()    );
2542     total.set_desired(    total.desired()     + fl->desired()    );
2543     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2544     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2545     total.set_count(      total.count()       + fl->count()      );
2546     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2547     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2548     total.set_split_births(total.split_births() + fl->split_births());
2549     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2550   }
2551   total.print_on(gclog_or_tty, "TOTAL");
2552   gclog_or_tty->print_cr("Total free in indexed lists "
2553                          SIZE_FORMAT " words", total_free);
2554   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2555     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2556             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2557     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2558   _dictionary->print_dict_census();
2559 }
2560 
2561 ///////////////////////////////////////////////////////////////////////////
2562 // CFLS_LAB
2563 ///////////////////////////////////////////////////////////////////////////
2564 
2565 #define VECTOR_257(x)                                                                                  \
2566   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2567   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2568      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2569      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2570      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2571      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2572      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2573      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2574      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2575      x }
2576 
2577 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2578 // OldPLABSize, whose static default is different; if overridden at the
2579 // command-line, this will get reinitialized via a call to
2580 // modify_initialization() below.
2581 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2582   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2583 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2584 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2585 
2586 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2587   _cfls(cfls)
2588 {
2589   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2590   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2591        i < CompactibleFreeListSpace::IndexSetSize;
2592        i += CompactibleFreeListSpace::IndexSetStride) {
2593     _indexedFreeList[i].set_size(i);
2594     _num_blocks[i] = 0;
2595   }
2596 }
2597 
2598 static bool _CFLS_LAB_modified = false;
2599 
2600 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2601   assert(!_CFLS_LAB_modified, "Call only once");
2602   _CFLS_LAB_modified = true;
2603   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2604        i < CompactibleFreeListSpace::IndexSetSize;
2605        i += CompactibleFreeListSpace::IndexSetStride) {
2606     _blocks_to_claim[i].modify(n, wt, true /* force */);
2607   }
2608 }
2609 
2610 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2611   FreeChunk* res;
2612   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2613   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2614     // This locking manages sync with other large object allocations.
2615     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2616                     Mutex::_no_safepoint_check_flag);
2617     res = _cfls->getChunkFromDictionaryExact(word_sz);
2618     if (res == NULL) return NULL;
2619   } else {
2620     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2621     if (fl->count() == 0) {
2622       // Attempt to refill this local free list.
2623       get_from_global_pool(word_sz, fl);
2624       // If it didn't work, give up.
2625       if (fl->count() == 0) return NULL;
2626     }
2627     res = fl->get_chunk_at_head();
2628     assert(res != NULL, "Why was count non-zero?");
2629   }
2630   res->markNotFree();
2631   assert(!res->is_free(), "shouldn't be marked free");
2632   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2633   // mangle a just allocated object with a distinct pattern.
2634   debug_only(res->mangleAllocated(word_sz));
2635   return (HeapWord*)res;
2636 }
2637 
2638 // Get a chunk of blocks of the right size and update related
2639 // book-keeping stats
2640 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2641   // Get the #blocks we want to claim
2642   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2643   assert(n_blks > 0, "Error");
2644   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2645   // In some cases, when the application has a phase change,
2646   // there may be a sudden and sharp shift in the object survival
2647   // profile, and updating the counts at the end of a scavenge
2648   // may not be quick enough, giving rise to large scavenge pauses
2649   // during these phase changes. It is beneficial to detect such
2650   // changes on-the-fly during a scavenge and avoid such a phase-change
2651   // pothole. The following code is a heuristic attempt to do that.
2652   // It is protected by a product flag until we have gained
2653   // enough experience with this heuristic and fine-tuned its behavior.
2654   // WARNING: This might increase fragmentation if we overreact to
2655   // small spikes, so some kind of historical smoothing based on
2656   // previous experience with the greater reactivity might be useful.
2657   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2658   // default.
2659   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2660     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2661     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2662     n_blks = MIN2(n_blks, CMSOldPLABMax);
2663   }
2664   assert(n_blks > 0, "Error");
2665   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2666   // Update stats table entry for this block size
2667   _num_blocks[word_sz] += fl->count();
2668 }
2669 
2670 void CFLS_LAB::compute_desired_plab_size() {
2671   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2672        i < CompactibleFreeListSpace::IndexSetSize;
2673        i += CompactibleFreeListSpace::IndexSetStride) {
2674     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2675            "Counter inconsistency");
2676     if (_global_num_workers[i] > 0) {
2677       // Need to smooth wrt historical average
2678       if (ResizeOldPLAB) {
2679         _blocks_to_claim[i].sample(
2680           MAX2((size_t)CMSOldPLABMin,
2681           MIN2((size_t)CMSOldPLABMax,
2682                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2683       }
2684       // Reset counters for next round
2685       _global_num_workers[i] = 0;
2686       _global_num_blocks[i] = 0;
2687       if (PrintOldPLAB) {
2688         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2689                                i, (size_t)_blocks_to_claim[i].average());
2690       }
2691     }
2692   }
2693 }
2694 
2695 // If this is changed in the future to allow parallel
2696 // access, one would need to take the FL locks and,
2697 // depending on how it is used, stagger access from
2698 // parallel threads to reduce contention.
2699 void CFLS_LAB::retire(int tid) {
2700   // We run this single threaded with the world stopped;
2701   // so no need for locks and such.
2702   NOT_PRODUCT(Thread* t = Thread::current();)
2703   assert(Thread::current()->is_VM_thread(), "Error");
2704   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2705        i < CompactibleFreeListSpace::IndexSetSize;
2706        i += CompactibleFreeListSpace::IndexSetStride) {
2707     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2708            "Can't retire more than what we obtained");
2709     if (_num_blocks[i] > 0) {
2710       size_t num_retire =  _indexedFreeList[i].count();
2711       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2712       {
2713         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2714         //                Mutex::_no_safepoint_check_flag);
2715 
2716         // Update globals stats for num_blocks used
2717         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2718         _global_num_workers[i]++;
2719         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2720         if (num_retire > 0) {
2721           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2722           // Reset this list.
2723           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2724           _indexedFreeList[i].set_size(i);
2725         }
2726       }
2727       if (PrintOldPLAB) {
2728         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2729                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2730       }
2731       // Reset stats for next round
2732       _num_blocks[i]         = 0;
2733     }
2734   }
2735 }
2736 
2737 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2738   assert(fl->count() == 0, "Precondition.");
2739   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2740          "Precondition");
2741 
2742   // We'll try all multiples of word_sz in the indexed set, starting with
2743   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2744   // then try getting a big chunk and splitting it.
2745   {
2746     bool found;
2747     int  k;
2748     size_t cur_sz;
2749     for (k = 1, cur_sz = k * word_sz, found = false;
2750          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2751          (CMSSplitIndexedFreeListBlocks || k <= 1);
2752          k++, cur_sz = k * word_sz) {
2753       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2754       fl_for_cur_sz.set_size(cur_sz);
2755       {
2756         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2757                         Mutex::_no_safepoint_check_flag);
2758         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2759         if (gfl->count() != 0) {
2760           // nn is the number of chunks of size cur_sz that
2761           // we'd need to split k-ways each, in order to create
2762           // "n" chunks of size word_sz each.
2763           const size_t nn = MAX2(n/k, (size_t)1);
2764           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2765           found = true;
2766           if (k > 1) {
2767             // Update split death stats for the cur_sz-size blocks list:
2768             // we increment the split death count by the number of blocks
2769             // we just took from the cur_sz-size blocks list and which
2770             // we will be splitting below.
2771             ssize_t deaths = gfl->split_deaths() +
2772                              fl_for_cur_sz.count();
2773             gfl->set_split_deaths(deaths);
2774           }
2775         }
2776       }
2777       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2778       if (found) {
2779         if (k == 1) {
2780           fl->prepend(&fl_for_cur_sz);
2781         } else {
2782           // Divide each block on fl_for_cur_sz up k ways.
2783           FreeChunk* fc;
2784           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2785             // Must do this in reverse order, so that anybody attempting to
2786             // access the main chunk sees it as a single free block until we
2787             // change it.
2788             size_t fc_size = fc->size();
2789             assert(fc->is_free(), "Error");
2790             for (int i = k-1; i >= 0; i--) {
2791               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2792               assert((i != 0) ||
2793                         ((fc == ffc) && ffc->is_free() &&
2794                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2795                         "Counting error");
2796               ffc->set_size(word_sz);
2797               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2798               ffc->link_next(NULL);
2799               // Above must occur before BOT is updated below.
2800               OrderAccess::storestore();
2801               // splitting from the right, fc_size == i * word_sz
2802               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2803               fc_size -= word_sz;
2804               assert(fc_size == i*word_sz, "Error");
2805               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2806               _bt.verify_single_block((HeapWord*)fc, fc_size);
2807               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2808               // Push this on "fl".
2809               fl->return_chunk_at_head(ffc);
2810             }
2811             // TRAP
2812             assert(fl->tail()->next() == NULL, "List invariant.");
2813           }
2814         }
2815         // Update birth stats for this block size.
2816         size_t num = fl->count();
2817         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2818                         Mutex::_no_safepoint_check_flag);
2819         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2820         _indexedFreeList[word_sz].set_split_births(births);
2821         return;
2822       }
2823     }
2824   }
2825   // Otherwise, we'll split a block from the dictionary.
2826   FreeChunk* fc = NULL;
2827   FreeChunk* rem_fc = NULL;
2828   size_t rem;
2829   {
2830     MutexLockerEx x(parDictionaryAllocLock(),
2831                     Mutex::_no_safepoint_check_flag);
2832     while (n > 0) {
2833       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2834                                   FreeBlockDictionary<FreeChunk>::atLeast);
2835       if (fc != NULL) {
2836         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2837         dictionary()->dict_census_update(fc->size(),
2838                                        true /*split*/,
2839                                        false /*birth*/);
2840         break;
2841       } else {
2842         n--;
2843       }
2844     }
2845     if (fc == NULL) return;
2846     // Otherwise, split up that block.
2847     assert((ssize_t)n >= 1, "Control point invariant");
2848     assert(fc->is_free(), "Error: should be a free block");
2849     _bt.verify_single_block((HeapWord*)fc, fc->size());
2850     const size_t nn = fc->size() / word_sz;
2851     n = MIN2(nn, n);
2852     assert((ssize_t)n >= 1, "Control point invariant");
2853     rem = fc->size() - n * word_sz;
2854     // If there is a remainder, and it's too small, allocate one fewer.
2855     if (rem > 0 && rem < MinChunkSize) {
2856       n--; rem += word_sz;
2857     }
2858     // Note that at this point we may have n == 0.
2859     assert((ssize_t)n >= 0, "Control point invariant");
2860 
2861     // If n is 0, the chunk fc that was found is not large
2862     // enough to leave a viable remainder.  We are unable to
2863     // allocate even one block.  Return fc to the
2864     // dictionary and return, leaving "fl" empty.
2865     if (n == 0) {
2866       returnChunkToDictionary(fc);
2867       assert(fl->count() == 0, "We never allocated any blocks");
2868       return;
2869     }
2870 
2871     // First return the remainder, if any.
2872     // Note that we hold the lock until we decide if we're going to give
2873     // back the remainder to the dictionary, since a concurrent allocation
2874     // may otherwise see the heap as empty.  (We're willing to take that
2875     // hit if the block is a small block.)
2876     if (rem > 0) {
2877       size_t prefix_size = n * word_sz;
2878       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2879       rem_fc->set_size(rem);
2880       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2881       rem_fc->link_next(NULL);
2882       // Above must occur before BOT is updated below.
2883       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2884       OrderAccess::storestore();
2885       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2886       assert(fc->is_free(), "Error");
2887       fc->set_size(prefix_size);
2888       if (rem >= IndexSetSize) {
2889         returnChunkToDictionary(rem_fc);
2890         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2891         rem_fc = NULL;
2892       }
2893       // Otherwise, return it to the small list below.
2894     }
2895   }
2896   if (rem_fc != NULL) {
2897     MutexLockerEx x(_indexedFreeListParLocks[rem],
2898                     Mutex::_no_safepoint_check_flag);
2899     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2900     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2901     smallSplitBirth(rem);
2902   }
2903   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2904   // Now do the splitting up.
2905   // Must do this in reverse order, so that anybody attempting to
2906   // access the main chunk sees it as a single free block until we
2907   // change it.
2908   size_t fc_size = n * word_sz;
2909   // All but first chunk in this loop
2910   for (ssize_t i = n-1; i > 0; i--) {
2911     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2912     ffc->set_size(word_sz);
2913     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2914     ffc->link_next(NULL);
2915     // Above must occur before BOT is updated below.
2916     OrderAccess::storestore();
2917     // splitting from the right, fc_size == (n - i + 1) * wordsize
2918     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2919     fc_size -= word_sz;
2920     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2921     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2922     _bt.verify_single_block((HeapWord*)fc, fc_size);
2923     // Push this on "fl".
2924     fl->return_chunk_at_head(ffc);
2925   }
2926   // First chunk
2927   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2928   // The blocks above should show their new sizes before the first block below
2929   fc->set_size(word_sz);
2930   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2931   fc->link_next(NULL);
2932   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2933   _bt.verify_single_block((HeapWord*)fc, fc->size());
2934   fl->return_chunk_at_head(fc);
2935 
2936   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2937   {
2938     // Update the stats for this block size.
2939     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2940                     Mutex::_no_safepoint_check_flag);
2941     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2942     _indexedFreeList[word_sz].set_split_births(births);
2943     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2944     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2945   }
2946 
2947   // TRAP
2948   assert(fl->tail()->next() == NULL, "List invariant.");
2949 }
2950 
2951 // Set up the space's par_seq_tasks structure for work claiming
2952 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2953 // XXX Need to suitably abstract and generalize this and the next
2954 // method into one.
2955 void
2956 CompactibleFreeListSpace::
2957 initialize_sequential_subtasks_for_rescan(int n_threads) {
2958   // The "size" of each task is fixed according to rescan_task_size.
2959   assert(n_threads > 0, "Unexpected n_threads argument");
2960   const size_t task_size = rescan_task_size();
2961   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2962   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2963   assert(n_tasks == 0 ||
2964          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2965           (used_region().start() + n_tasks*task_size >= used_region().end())),
2966          "n_tasks calculation incorrect");
2967   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2968   assert(!pst->valid(), "Clobbering existing data?");
2969   // Sets the condition for completion of the subtask (how many threads
2970   // need to finish in order to be done).
2971   pst->set_n_threads(n_threads);
2972   pst->set_n_tasks((int)n_tasks);
2973 }
2974 
2975 // Set up the space's par_seq_tasks structure for work claiming
2976 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2977 void
2978 CompactibleFreeListSpace::
2979 initialize_sequential_subtasks_for_marking(int n_threads,
2980                                            HeapWord* low) {
2981   // The "size" of each task is fixed according to rescan_task_size.
2982   assert(n_threads > 0, "Unexpected n_threads argument");
2983   const size_t task_size = marking_task_size();
2984   assert(task_size > CardTableModRefBS::card_size_in_words &&
2985          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2986          "Otherwise arithmetic below would be incorrect");
2987   MemRegion span = _gen->reserved();
2988   if (low != NULL) {
2989     if (span.contains(low)) {
2990       // Align low down to  a card boundary so that
2991       // we can use block_offset_careful() on span boundaries.
2992       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2993                                  CardTableModRefBS::card_size);
2994       // Clip span prefix at aligned_low
2995       span = span.intersection(MemRegion(aligned_low, span.end()));
2996     } else if (low > span.end()) {
2997       span = MemRegion(low, low);  // Null region
2998     } // else use entire span
2999   }
3000   assert(span.is_empty() ||
3001          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3002         "span should start at a card boundary");
3003   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3004   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3005   assert(n_tasks == 0 ||
3006          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3007           (span.start() + n_tasks*task_size >= span.end())),
3008          "n_tasks calculation incorrect");
3009   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3010   assert(!pst->valid(), "Clobbering existing data?");
3011   // Sets the condition for completion of the subtask (how many threads
3012   // need to finish in order to be done).
3013   pst->set_n_threads(n_threads);
3014   pst->set_n_tasks((int)n_tasks);
3015 }