1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  38 #include "gc_implementation/parNew/parNewGeneration.hpp"
  39 #include "gc_implementation/shared/collectorCounters.hpp"
  40 #include "gc_implementation/shared/gcTimer.hpp"
  41 #include "gc_implementation/shared/gcTrace.hpp"
  42 #include "gc_implementation/shared/gcTraceTime.hpp"
  43 #include "gc_implementation/shared/isGCActiveMark.hpp"
  44 #include "gc_interface/collectedHeap.inline.hpp"
  45 #include "memory/allocation.hpp"
  46 #include "memory/cardTableRS.hpp"
  47 #include "memory/collectorPolicy.hpp"
  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.hpp"
  53 #include "memory/padded.hpp"
  54 #include "memory/referencePolicy.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/tenuredGeneration.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "prims/jvmtiExport.hpp"
  59 #include "runtime/globals_extension.hpp"
  60 #include "runtime/handles.inline.hpp"
  61 #include "runtime/java.hpp"
  62 #include "runtime/orderAccess.inline.hpp"
  63 #include "runtime/vmThread.hpp"
  64 #include "services/memoryService.hpp"
  65 #include "services/runtimeService.hpp"
  66 
  67 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  68 
  69 // statics
  70 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  71 bool CMSCollector::_full_gc_requested = false;
  72 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  73 
  74 //////////////////////////////////////////////////////////////////
  75 // In support of CMS/VM thread synchronization
  76 //////////////////////////////////////////////////////////////////
  77 // We split use of the CGC_lock into 2 "levels".
  78 // The low-level locking is of the usual CGC_lock monitor. We introduce
  79 // a higher level "token" (hereafter "CMS token") built on top of the
  80 // low level monitor (hereafter "CGC lock").
  81 // The token-passing protocol gives priority to the VM thread. The
  82 // CMS-lock doesn't provide any fairness guarantees, but clients
  83 // should ensure that it is only held for very short, bounded
  84 // durations.
  85 //
  86 // When either of the CMS thread or the VM thread is involved in
  87 // collection operations during which it does not want the other
  88 // thread to interfere, it obtains the CMS token.
  89 //
  90 // If either thread tries to get the token while the other has
  91 // it, that thread waits. However, if the VM thread and CMS thread
  92 // both want the token, then the VM thread gets priority while the
  93 // CMS thread waits. This ensures, for instance, that the "concurrent"
  94 // phases of the CMS thread's work do not block out the VM thread
  95 // for long periods of time as the CMS thread continues to hog
  96 // the token. (See bug 4616232).
  97 //
  98 // The baton-passing functions are, however, controlled by the
  99 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
 100 // and here the low-level CMS lock, not the high level token,
 101 // ensures mutual exclusion.
 102 //
 103 // Two important conditions that we have to satisfy:
 104 // 1. if a thread does a low-level wait on the CMS lock, then it
 105 //    relinquishes the CMS token if it were holding that token
 106 //    when it acquired the low-level CMS lock.
 107 // 2. any low-level notifications on the low-level lock
 108 //    should only be sent when a thread has relinquished the token.
 109 //
 110 // In the absence of either property, we'd have potential deadlock.
 111 //
 112 // We protect each of the CMS (concurrent and sequential) phases
 113 // with the CMS _token_, not the CMS _lock_.
 114 //
 115 // The only code protected by CMS lock is the token acquisition code
 116 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 117 // baton-passing code.
 118 //
 119 // Unfortunately, i couldn't come up with a good abstraction to factor and
 120 // hide the naked CGC_lock manipulation in the baton-passing code
 121 // further below. That's something we should try to do. Also, the proof
 122 // of correctness of this 2-level locking scheme is far from obvious,
 123 // and potentially quite slippery. We have an uneasy suspicion, for instance,
 124 // that there may be a theoretical possibility of delay/starvation in the
 125 // low-level lock/wait/notify scheme used for the baton-passing because of
 126 // potential interference with the priority scheme embodied in the
 127 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 128 // invocation further below and marked with "XXX 20011219YSR".
 129 // Indeed, as we note elsewhere, this may become yet more slippery
 130 // in the presence of multiple CMS and/or multiple VM threads. XXX
 131 
 132 class CMSTokenSync: public StackObj {
 133  private:
 134   bool _is_cms_thread;
 135  public:
 136   CMSTokenSync(bool is_cms_thread):
 137     _is_cms_thread(is_cms_thread) {
 138     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 139            "Incorrect argument to constructor");
 140     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 141   }
 142 
 143   ~CMSTokenSync() {
 144     assert(_is_cms_thread ?
 145              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 146              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 147           "Incorrect state");
 148     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 149   }
 150 };
 151 
 152 // Convenience class that does a CMSTokenSync, and then acquires
 153 // upto three locks.
 154 class CMSTokenSyncWithLocks: public CMSTokenSync {
 155  private:
 156   // Note: locks are acquired in textual declaration order
 157   // and released in the opposite order
 158   MutexLockerEx _locker1, _locker2, _locker3;
 159  public:
 160   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 161                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 162     CMSTokenSync(is_cms_thread),
 163     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 164     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 165     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 166   { }
 167 };
 168 
 169 
 170 // Wrapper class to temporarily disable icms during a foreground cms collection.
 171 class ICMSDisabler: public StackObj {
 172  public:
 173   // The ctor disables icms and wakes up the thread so it notices the change;
 174   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 175   // CMSIncrementalMode.
 176   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 177   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 178 };
 179 
 180 //////////////////////////////////////////////////////////////////
 181 //  Concurrent Mark-Sweep Generation /////////////////////////////
 182 //////////////////////////////////////////////////////////////////
 183 
 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 185 
 186 // This struct contains per-thread things necessary to support parallel
 187 // young-gen collection.
 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
 189  public:
 190   CFLS_LAB lab;
 191   PromotionInfo promo;
 192 
 193   // Constructor.
 194   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 195     promo.setSpace(cfls);
 196   }
 197 };
 198 
 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 200      ReservedSpace rs, size_t initial_byte_size, int level,
 201      CardTableRS* ct, bool use_adaptive_freelists,
 202      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 203   CardGeneration(rs, initial_byte_size, level, ct),
 204   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 205   _debug_collection_type(Concurrent_collection_type),
 206   _did_compact(false)
 207 {
 208   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 209   HeapWord* end    = (HeapWord*) _virtual_space.high();
 210 
 211   _direct_allocated_words = 0;
 212   NOT_PRODUCT(
 213     _numObjectsPromoted = 0;
 214     _numWordsPromoted = 0;
 215     _numObjectsAllocated = 0;
 216     _numWordsAllocated = 0;
 217   )
 218 
 219   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 220                                            use_adaptive_freelists,
 221                                            dictionaryChoice);
 222   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 223   if (_cmsSpace == NULL) {
 224     vm_exit_during_initialization(
 225       "CompactibleFreeListSpace allocation failure");
 226   }
 227   _cmsSpace->_gen = this;
 228 
 229   _gc_stats = new CMSGCStats();
 230 
 231   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 232   // offsets match. The ability to tell free chunks from objects
 233   // depends on this property.
 234   debug_only(
 235     FreeChunk* junk = NULL;
 236     assert(UseCompressedClassPointers ||
 237            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 238            "Offset of FreeChunk::_prev within FreeChunk must match"
 239            "  that of OopDesc::_klass within OopDesc");
 240   )
 241   if (CollectedHeap::use_parallel_gc_threads()) {
 242     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 243     _par_gc_thread_states =
 244       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 245     if (_par_gc_thread_states == NULL) {
 246       vm_exit_during_initialization("Could not allocate par gc structs");
 247     }
 248     for (uint i = 0; i < ParallelGCThreads; i++) {
 249       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 250       if (_par_gc_thread_states[i] == NULL) {
 251         vm_exit_during_initialization("Could not allocate par gc structs");
 252       }
 253     }
 254   } else {
 255     _par_gc_thread_states = NULL;
 256   }
 257   _incremental_collection_failed = false;
 258   // The "dilatation_factor" is the expansion that can occur on
 259   // account of the fact that the minimum object size in the CMS
 260   // generation may be larger than that in, say, a contiguous young
 261   //  generation.
 262   // Ideally, in the calculation below, we'd compute the dilatation
 263   // factor as: MinChunkSize/(promoting_gen's min object size)
 264   // Since we do not have such a general query interface for the
 265   // promoting generation, we'll instead just use the minimum
 266   // object size (which today is a header's worth of space);
 267   // note that all arithmetic is in units of HeapWords.
 268   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 269   assert(_dilatation_factor >= 1.0, "from previous assert");
 270 }
 271 
 272 
 273 // The field "_initiating_occupancy" represents the occupancy percentage
 274 // at which we trigger a new collection cycle.  Unless explicitly specified
 275 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 276 // is calculated by:
 277 //
 278 //   Let "f" be MinHeapFreeRatio in
 279 //
 280 //    _initiating_occupancy = 100-f +
 281 //                           f * (CMSTriggerRatio/100)
 282 //   where CMSTriggerRatio is the argument "tr" below.
 283 //
 284 // That is, if we assume the heap is at its desired maximum occupancy at the
 285 // end of a collection, we let CMSTriggerRatio of the (purported) free
 286 // space be allocated before initiating a new collection cycle.
 287 //
 288 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 289   assert(io <= 100 && tr <= 100, "Check the arguments");
 290   if (io >= 0) {
 291     _initiating_occupancy = (double)io / 100.0;
 292   } else {
 293     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 294                              (double)(tr * MinHeapFreeRatio) / 100.0)
 295                             / 100.0;
 296   }
 297 }
 298 
 299 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 300   assert(collector() != NULL, "no collector");
 301   collector()->ref_processor_init();
 302 }
 303 
 304 void CMSCollector::ref_processor_init() {
 305   if (_ref_processor == NULL) {
 306     // Allocate and initialize a reference processor
 307     _ref_processor =
 308       new ReferenceProcessor(_span,                               // span
 309                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 310                              (int) ParallelGCThreads,             // mt processing degree
 311                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 312                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 313                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 314                              &_is_alive_closure);                 // closure for liveness info
 315     // Initialize the _ref_processor field of CMSGen
 316     _cmsGen->set_ref_processor(_ref_processor);
 317 
 318   }
 319 }
 320 
 321 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 322   GenCollectedHeap* gch = GenCollectedHeap::heap();
 323   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 324     "Wrong type of heap");
 325   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 326     gch->gen_policy()->size_policy();
 327   assert(sp->is_gc_cms_adaptive_size_policy(),
 328     "Wrong type of size policy");
 329   return sp;
 330 }
 331 
 332 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
 333   CMSGCAdaptivePolicyCounters* results =
 334     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
 335   assert(
 336     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
 337     "Wrong gc policy counter kind");
 338   return results;
 339 }
 340 
 341 
 342 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 343 
 344   const char* gen_name = "old";
 345 
 346   // Generation Counters - generation 1, 1 subspace
 347   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
 348 
 349   _space_counters = new GSpaceCounters(gen_name, 0,
 350                                        _virtual_space.reserved_size(),
 351                                        this, _gen_counters);
 352 }
 353 
 354 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 355   _cms_gen(cms_gen)
 356 {
 357   assert(alpha <= 100, "bad value");
 358   _saved_alpha = alpha;
 359 
 360   // Initialize the alphas to the bootstrap value of 100.
 361   _gc0_alpha = _cms_alpha = 100;
 362 
 363   _cms_begin_time.update();
 364   _cms_end_time.update();
 365 
 366   _gc0_duration = 0.0;
 367   _gc0_period = 0.0;
 368   _gc0_promoted = 0;
 369 
 370   _cms_duration = 0.0;
 371   _cms_period = 0.0;
 372   _cms_allocated = 0;
 373 
 374   _cms_used_at_gc0_begin = 0;
 375   _cms_used_at_gc0_end = 0;
 376   _allow_duty_cycle_reduction = false;
 377   _valid_bits = 0;
 378   _icms_duty_cycle = CMSIncrementalDutyCycle;
 379 }
 380 
 381 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 382   // TBD: CR 6909490
 383   return 1.0;
 384 }
 385 
 386 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 387 }
 388 
 389 // If promotion failure handling is on use
 390 // the padded average size of the promotion for each
 391 // young generation collection.
 392 double CMSStats::time_until_cms_gen_full() const {
 393   size_t cms_free = _cms_gen->cmsSpace()->free();
 394   GenCollectedHeap* gch = GenCollectedHeap::heap();
 395   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 396                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 397   if (cms_free > expected_promotion) {
 398     // Start a cms collection if there isn't enough space to promote
 399     // for the next minor collection.  Use the padded average as
 400     // a safety factor.
 401     cms_free -= expected_promotion;
 402 
 403     // Adjust by the safety factor.
 404     double cms_free_dbl = (double)cms_free;
 405     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 406     // Apply a further correction factor which tries to adjust
 407     // for recent occurance of concurrent mode failures.
 408     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 409     cms_free_dbl = cms_free_dbl * cms_adjustment;
 410 
 411     if (PrintGCDetails && Verbose) {
 412       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 413         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 414         cms_free, expected_promotion);
 415       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 416         cms_free_dbl, cms_consumption_rate() + 1.0);
 417     }
 418     // Add 1 in case the consumption rate goes to zero.
 419     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 420   }
 421   return 0.0;
 422 }
 423 
 424 // Compare the duration of the cms collection to the
 425 // time remaining before the cms generation is empty.
 426 // Note that the time from the start of the cms collection
 427 // to the start of the cms sweep (less than the total
 428 // duration of the cms collection) can be used.  This
 429 // has been tried and some applications experienced
 430 // promotion failures early in execution.  This was
 431 // possibly because the averages were not accurate
 432 // enough at the beginning.
 433 double CMSStats::time_until_cms_start() const {
 434   // We add "gc0_period" to the "work" calculation
 435   // below because this query is done (mostly) at the
 436   // end of a scavenge, so we need to conservatively
 437   // account for that much possible delay
 438   // in the query so as to avoid concurrent mode failures
 439   // due to starting the collection just a wee bit too
 440   // late.
 441   double work = cms_duration() + gc0_period();
 442   double deadline = time_until_cms_gen_full();
 443   // If a concurrent mode failure occurred recently, we want to be
 444   // more conservative and halve our expected time_until_cms_gen_full()
 445   if (work > deadline) {
 446     if (Verbose && PrintGCDetails) {
 447       gclog_or_tty->print(
 448         " CMSCollector: collect because of anticipated promotion "
 449         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 450         gc0_period(), time_until_cms_gen_full());
 451     }
 452     return 0.0;
 453   }
 454   return work - deadline;
 455 }
 456 
 457 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 458 // amount of change to prevent wild oscillation.
 459 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 460                                               unsigned int new_duty_cycle) {
 461   assert(old_duty_cycle <= 100, "bad input value");
 462   assert(new_duty_cycle <= 100, "bad input value");
 463 
 464   // Note:  use subtraction with caution since it may underflow (values are
 465   // unsigned).  Addition is safe since we're in the range 0-100.
 466   unsigned int damped_duty_cycle = new_duty_cycle;
 467   if (new_duty_cycle < old_duty_cycle) {
 468     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 469     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 470       damped_duty_cycle = old_duty_cycle - largest_delta;
 471     }
 472   } else if (new_duty_cycle > old_duty_cycle) {
 473     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 474     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 475       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 476     }
 477   }
 478   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 479 
 480   if (CMSTraceIncrementalPacing) {
 481     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 482                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 483   }
 484   return damped_duty_cycle;
 485 }
 486 
 487 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 488   assert(CMSIncrementalPacing && valid(),
 489          "should be handled in icms_update_duty_cycle()");
 490 
 491   double cms_time_so_far = cms_timer().seconds();
 492   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 493   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 494 
 495   // Avoid division by 0.
 496   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 497   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 498 
 499   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 500   if (new_duty_cycle > _icms_duty_cycle) {
 501     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 502     if (new_duty_cycle > 2) {
 503       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 504                                                 new_duty_cycle);
 505     }
 506   } else if (_allow_duty_cycle_reduction) {
 507     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 508     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 509     // Respect the minimum duty cycle.
 510     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 511     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 512   }
 513 
 514   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 515     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 516   }
 517 
 518   _allow_duty_cycle_reduction = false;
 519   return _icms_duty_cycle;
 520 }
 521 
 522 #ifndef PRODUCT
 523 void CMSStats::print_on(outputStream *st) const {
 524   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 525   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 526                gc0_duration(), gc0_period(), gc0_promoted());
 527   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 528             cms_duration(), cms_duration_per_mb(),
 529             cms_period(), cms_allocated());
 530   st->print(",cms_since_beg=%g,cms_since_end=%g",
 531             cms_time_since_begin(), cms_time_since_end());
 532   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 533             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 534   if (CMSIncrementalMode) {
 535     st->print(",dc=%d", icms_duty_cycle());
 536   }
 537 
 538   if (valid()) {
 539     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 540               promotion_rate(), cms_allocation_rate());
 541     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 542               cms_consumption_rate(), time_until_cms_gen_full());
 543   }
 544   st->print(" ");
 545 }
 546 #endif // #ifndef PRODUCT
 547 
 548 CMSCollector::CollectorState CMSCollector::_collectorState =
 549                              CMSCollector::Idling;
 550 bool CMSCollector::_foregroundGCIsActive = false;
 551 bool CMSCollector::_foregroundGCShouldWait = false;
 552 
 553 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 554                            CardTableRS*                   ct,
 555                            ConcurrentMarkSweepPolicy*     cp):
 556   _cmsGen(cmsGen),
 557   _ct(ct),
 558   _ref_processor(NULL),    // will be set later
 559   _conc_workers(NULL),     // may be set later
 560   _abort_preclean(false),
 561   _start_sampling(false),
 562   _between_prologue_and_epilogue(false),
 563   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 564   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 565                  -1 /* lock-free */, "No_lock" /* dummy */),
 566   _modUnionClosure(&_modUnionTable),
 567   _modUnionClosurePar(&_modUnionTable),
 568   // Adjust my span to cover old (cms) gen
 569   _span(cmsGen->reserved()),
 570   // Construct the is_alive_closure with _span & markBitMap
 571   _is_alive_closure(_span, &_markBitMap),
 572   _restart_addr(NULL),
 573   _overflow_list(NULL),
 574   _stats(cmsGen),
 575   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
 576   _eden_chunk_array(NULL),     // may be set in ctor body
 577   _eden_chunk_capacity(0),     // -- ditto --
 578   _eden_chunk_index(0),        // -- ditto --
 579   _survivor_plab_array(NULL),  // -- ditto --
 580   _survivor_chunk_array(NULL), // -- ditto --
 581   _survivor_chunk_capacity(0), // -- ditto --
 582   _survivor_chunk_index(0),    // -- ditto --
 583   _ser_pmc_preclean_ovflw(0),
 584   _ser_kac_preclean_ovflw(0),
 585   _ser_pmc_remark_ovflw(0),
 586   _par_pmc_remark_ovflw(0),
 587   _ser_kac_ovflw(0),
 588   _par_kac_ovflw(0),
 589 #ifndef PRODUCT
 590   _num_par_pushes(0),
 591 #endif
 592   _collection_count_start(0),
 593   _verifying(false),
 594   _icms_start_limit(NULL),
 595   _icms_stop_limit(NULL),
 596   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 597   _completed_initialization(false),
 598   _collector_policy(cp),
 599   _should_unload_classes(CMSClassUnloadingEnabled),
 600   _concurrent_cycles_since_last_unload(0),
 601   _roots_scanning_options(SharedHeap::SO_None),
 602   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 603   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 604   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 605   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 606   _cms_start_registered(false)
 607 {
 608   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 609     ExplicitGCInvokesConcurrent = true;
 610   }
 611   // Now expand the span and allocate the collection support structures
 612   // (MUT, marking bit map etc.) to cover both generations subject to
 613   // collection.
 614 
 615   // For use by dirty card to oop closures.
 616   _cmsGen->cmsSpace()->set_collector(this);
 617 
 618   // Allocate MUT and marking bit map
 619   {
 620     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 621     if (!_markBitMap.allocate(_span)) {
 622       warning("Failed to allocate CMS Bit Map");
 623       return;
 624     }
 625     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 626   }
 627   {
 628     _modUnionTable.allocate(_span);
 629     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 630   }
 631 
 632   if (!_markStack.allocate(MarkStackSize)) {
 633     warning("Failed to allocate CMS Marking Stack");
 634     return;
 635   }
 636 
 637   // Support for multi-threaded concurrent phases
 638   if (CMSConcurrentMTEnabled) {
 639     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 640       // just for now
 641       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 642     }
 643     if (ConcGCThreads > 1) {
 644       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 645                                  ConcGCThreads, true);
 646       if (_conc_workers == NULL) {
 647         warning("GC/CMS: _conc_workers allocation failure: "
 648               "forcing -CMSConcurrentMTEnabled");
 649         CMSConcurrentMTEnabled = false;
 650       } else {
 651         _conc_workers->initialize_workers();
 652       }
 653     } else {
 654       CMSConcurrentMTEnabled = false;
 655     }
 656   }
 657   if (!CMSConcurrentMTEnabled) {
 658     ConcGCThreads = 0;
 659   } else {
 660     // Turn off CMSCleanOnEnter optimization temporarily for
 661     // the MT case where it's not fixed yet; see 6178663.
 662     CMSCleanOnEnter = false;
 663   }
 664   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 665          "Inconsistency");
 666 
 667   // Parallel task queues; these are shared for the
 668   // concurrent and stop-world phases of CMS, but
 669   // are not shared with parallel scavenge (ParNew).
 670   {
 671     uint i;
 672     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 673 
 674     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 675          || ParallelRefProcEnabled)
 676         && num_queues > 0) {
 677       _task_queues = new OopTaskQueueSet(num_queues);
 678       if (_task_queues == NULL) {
 679         warning("task_queues allocation failure.");
 680         return;
 681       }
 682       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 683       if (_hash_seed == NULL) {
 684         warning("_hash_seed array allocation failure");
 685         return;
 686       }
 687 
 688       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 689       for (i = 0; i < num_queues; i++) {
 690         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 691         if (q == NULL) {
 692           warning("work_queue allocation failure.");
 693           return;
 694         }
 695         _task_queues->register_queue(i, q);
 696       }
 697       for (i = 0; i < num_queues; i++) {
 698         _task_queues->queue(i)->initialize();
 699         _hash_seed[i] = 17;  // copied from ParNew
 700       }
 701     }
 702   }
 703 
 704   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 705 
 706   // Clip CMSBootstrapOccupancy between 0 and 100.
 707   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 708 
 709   _full_gcs_since_conc_gc = 0;
 710 
 711   // Now tell CMS generations the identity of their collector
 712   ConcurrentMarkSweepGeneration::set_collector(this);
 713 
 714   // Create & start a CMS thread for this CMS collector
 715   _cmsThread = ConcurrentMarkSweepThread::start(this);
 716   assert(cmsThread() != NULL, "CMS Thread should have been created");
 717   assert(cmsThread()->collector() == this,
 718          "CMS Thread should refer to this gen");
 719   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 720 
 721   // Support for parallelizing young gen rescan
 722   GenCollectedHeap* gch = GenCollectedHeap::heap();
 723   _young_gen = gch->prev_gen(_cmsGen);
 724   if (gch->supports_inline_contig_alloc()) {
 725     _top_addr = gch->top_addr();
 726     _end_addr = gch->end_addr();
 727     assert(_young_gen != NULL, "no _young_gen");
 728     _eden_chunk_index = 0;
 729     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 730     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 731     if (_eden_chunk_array == NULL) {
 732       _eden_chunk_capacity = 0;
 733       warning("GC/CMS: _eden_chunk_array allocation failure");
 734     }
 735   }
 736   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 737 
 738   // Support for parallelizing survivor space rescan
 739   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 740     const size_t max_plab_samples =
 741       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 742 
 743     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 744     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 745     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 746     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 747         || _cursor == NULL) {
 748       warning("Failed to allocate survivor plab/chunk array");
 749       if (_survivor_plab_array  != NULL) {
 750         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 751         _survivor_plab_array = NULL;
 752       }
 753       if (_survivor_chunk_array != NULL) {
 754         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 755         _survivor_chunk_array = NULL;
 756       }
 757       if (_cursor != NULL) {
 758         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 759         _cursor = NULL;
 760       }
 761     } else {
 762       _survivor_chunk_capacity = 2*max_plab_samples;
 763       for (uint i = 0; i < ParallelGCThreads; i++) {
 764         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 765         if (vec == NULL) {
 766           warning("Failed to allocate survivor plab array");
 767           for (int j = i; j > 0; j--) {
 768             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
 769           }
 770           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 771           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 772           _survivor_plab_array = NULL;
 773           _survivor_chunk_array = NULL;
 774           _survivor_chunk_capacity = 0;
 775           break;
 776         } else {
 777           ChunkArray* cur =
 778             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 779                                                         max_plab_samples);
 780           assert(cur->end() == 0, "Should be 0");
 781           assert(cur->array() == vec, "Should be vec");
 782           assert(cur->capacity() == max_plab_samples, "Error");
 783         }
 784       }
 785     }
 786   }
 787   assert(   (   _survivor_plab_array  != NULL
 788              && _survivor_chunk_array != NULL)
 789          || (   _survivor_chunk_capacity == 0
 790              && _survivor_chunk_index == 0),
 791          "Error");
 792 
 793   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 794   _gc_counters = new CollectorCounters("CMS", 1);
 795   _completed_initialization = true;
 796   _inter_sweep_timer.start();  // start of time
 797 }
 798 
 799 const char* ConcurrentMarkSweepGeneration::name() const {
 800   return "concurrent mark-sweep generation";
 801 }
 802 void ConcurrentMarkSweepGeneration::update_counters() {
 803   if (UsePerfData) {
 804     _space_counters->update_all();
 805     _gen_counters->update_all();
 806   }
 807 }
 808 
 809 // this is an optimized version of update_counters(). it takes the
 810 // used value as a parameter rather than computing it.
 811 //
 812 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 813   if (UsePerfData) {
 814     _space_counters->update_used(used);
 815     _space_counters->update_capacity();
 816     _gen_counters->update_all();
 817   }
 818 }
 819 
 820 void ConcurrentMarkSweepGeneration::print() const {
 821   Generation::print();
 822   cmsSpace()->print();
 823 }
 824 
 825 #ifndef PRODUCT
 826 void ConcurrentMarkSweepGeneration::print_statistics() {
 827   cmsSpace()->printFLCensus(0);
 828 }
 829 #endif
 830 
 831 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 832   GenCollectedHeap* gch = GenCollectedHeap::heap();
 833   if (PrintGCDetails) {
 834     if (Verbose) {
 835       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 836         level(), short_name(), s, used(), capacity());
 837     } else {
 838       gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 839         level(), short_name(), s, used() / K, capacity() / K);
 840     }
 841   }
 842   if (Verbose) {
 843     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 844               gch->used(), gch->capacity());
 845   } else {
 846     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 847               gch->used() / K, gch->capacity() / K);
 848   }
 849 }
 850 
 851 size_t
 852 ConcurrentMarkSweepGeneration::contiguous_available() const {
 853   // dld proposes an improvement in precision here. If the committed
 854   // part of the space ends in a free block we should add that to
 855   // uncommitted size in the calculation below. Will make this
 856   // change later, staying with the approximation below for the
 857   // time being. -- ysr.
 858   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 859 }
 860 
 861 size_t
 862 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 863   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 864 }
 865 
 866 size_t ConcurrentMarkSweepGeneration::max_available() const {
 867   return free() + _virtual_space.uncommitted_size();
 868 }
 869 
 870 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 871   size_t available = max_available();
 872   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 873   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 874   if (Verbose && PrintGCDetails) {
 875     gclog_or_tty->print_cr(
 876       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 877       "max_promo("SIZE_FORMAT")",
 878       res? "":" not", available, res? ">=":"<",
 879       av_promo, max_promotion_in_bytes);
 880   }
 881   return res;
 882 }
 883 
 884 // At a promotion failure dump information on block layout in heap
 885 // (cms old generation).
 886 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 887   if (CMSDumpAtPromotionFailure) {
 888     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 889   }
 890 }
 891 
 892 CompactibleSpace*
 893 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 894   return _cmsSpace;
 895 }
 896 
 897 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 898   // Clear the promotion information.  These pointers can be adjusted
 899   // along with all the other pointers into the heap but
 900   // compaction is expected to be a rare event with
 901   // a heap using cms so don't do it without seeing the need.
 902   if (CollectedHeap::use_parallel_gc_threads()) {
 903     for (uint i = 0; i < ParallelGCThreads; i++) {
 904       _par_gc_thread_states[i]->promo.reset();
 905     }
 906   }
 907 }
 908 
 909 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
 910   blk->do_space(_cmsSpace);
 911 }
 912 
 913 void ConcurrentMarkSweepGeneration::compute_new_size() {
 914   assert_locked_or_safepoint(Heap_lock);
 915 
 916   // If incremental collection failed, we just want to expand
 917   // to the limit.
 918   if (incremental_collection_failed()) {
 919     clear_incremental_collection_failed();
 920     grow_to_reserved();
 921     return;
 922   }
 923 
 924   // The heap has been compacted but not reset yet.
 925   // Any metric such as free() or used() will be incorrect.
 926 
 927   CardGeneration::compute_new_size();
 928 
 929   // Reset again after a possible resizing
 930   if (did_compact()) {
 931     cmsSpace()->reset_after_compaction();
 932   }
 933 }
 934 
 935 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 936   assert_locked_or_safepoint(Heap_lock);
 937 
 938   // If incremental collection failed, we just want to expand
 939   // to the limit.
 940   if (incremental_collection_failed()) {
 941     clear_incremental_collection_failed();
 942     grow_to_reserved();
 943     return;
 944   }
 945 
 946   double free_percentage = ((double) free()) / capacity();
 947   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 948   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 949 
 950   // compute expansion delta needed for reaching desired free percentage
 951   if (free_percentage < desired_free_percentage) {
 952     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 953     assert(desired_capacity >= capacity(), "invalid expansion size");
 954     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 955     if (PrintGCDetails && Verbose) {
 956       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 957       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 958       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 959       gclog_or_tty->print_cr("  Desired free fraction %f",
 960         desired_free_percentage);
 961       gclog_or_tty->print_cr("  Maximum free fraction %f",
 962         maximum_free_percentage);
 963       gclog_or_tty->print_cr("  Capacity "SIZE_FORMAT, capacity()/1000);
 964       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 965         desired_capacity/1000);
 966       int prev_level = level() - 1;
 967       if (prev_level >= 0) {
 968         size_t prev_size = 0;
 969         GenCollectedHeap* gch = GenCollectedHeap::heap();
 970         Generation* prev_gen = gch->_gens[prev_level];
 971         prev_size = prev_gen->capacity();
 972           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 973                                  prev_size/1000);
 974       }
 975       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 976         unsafe_max_alloc_nogc()/1000);
 977       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 978         contiguous_available()/1000);
 979       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 980         expand_bytes);
 981     }
 982     // safe if expansion fails
 983     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 984     if (PrintGCDetails && Verbose) {
 985       gclog_or_tty->print_cr("  Expanded free fraction %f",
 986         ((double) free()) / capacity());
 987     }
 988   } else {
 989     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 990     assert(desired_capacity <= capacity(), "invalid expansion size");
 991     size_t shrink_bytes = capacity() - desired_capacity;
 992     // Don't shrink unless the delta is greater than the minimum shrink we want
 993     if (shrink_bytes >= MinHeapDeltaBytes) {
 994       shrink_free_list_by(shrink_bytes);
 995     }
 996   }
 997 }
 998 
 999 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1000   return cmsSpace()->freelistLock();
1001 }
1002 
1003 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1004                                                   bool   tlab) {
1005   CMSSynchronousYieldRequest yr;
1006   MutexLockerEx x(freelistLock(),
1007                   Mutex::_no_safepoint_check_flag);
1008   return have_lock_and_allocate(size, tlab);
1009 }
1010 
1011 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1012                                                   bool   tlab /* ignored */) {
1013   assert_lock_strong(freelistLock());
1014   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1015   HeapWord* res = cmsSpace()->allocate(adjustedSize);
1016   // Allocate the object live (grey) if the background collector has
1017   // started marking. This is necessary because the marker may
1018   // have passed this address and consequently this object will
1019   // not otherwise be greyed and would be incorrectly swept up.
1020   // Note that if this object contains references, the writing
1021   // of those references will dirty the card containing this object
1022   // allowing the object to be blackened (and its references scanned)
1023   // either during a preclean phase or at the final checkpoint.
1024   if (res != NULL) {
1025     // We may block here with an uninitialized object with
1026     // its mark-bit or P-bits not yet set. Such objects need
1027     // to be safely navigable by block_start().
1028     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1029     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1030     collector()->direct_allocated(res, adjustedSize);
1031     _direct_allocated_words += adjustedSize;
1032     // allocation counters
1033     NOT_PRODUCT(
1034       _numObjectsAllocated++;
1035       _numWordsAllocated += (int)adjustedSize;
1036     )
1037   }
1038   return res;
1039 }
1040 
1041 // In the case of direct allocation by mutators in a generation that
1042 // is being concurrently collected, the object must be allocated
1043 // live (grey) if the background collector has started marking.
1044 // This is necessary because the marker may
1045 // have passed this address and consequently this object will
1046 // not otherwise be greyed and would be incorrectly swept up.
1047 // Note that if this object contains references, the writing
1048 // of those references will dirty the card containing this object
1049 // allowing the object to be blackened (and its references scanned)
1050 // either during a preclean phase or at the final checkpoint.
1051 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1052   assert(_markBitMap.covers(start, size), "Out of bounds");
1053   if (_collectorState >= Marking) {
1054     MutexLockerEx y(_markBitMap.lock(),
1055                     Mutex::_no_safepoint_check_flag);
1056     // [see comments preceding SweepClosure::do_blk() below for details]
1057     //
1058     // Can the P-bits be deleted now?  JJJ
1059     //
1060     // 1. need to mark the object as live so it isn't collected
1061     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1062     // 3. need to mark the end of the object so marking, precleaning or sweeping
1063     //    can skip over uninitialized or unparsable objects. An allocated
1064     //    object is considered uninitialized for our purposes as long as
1065     //    its klass word is NULL.  All old gen objects are parsable
1066     //    as soon as they are initialized.)
1067     _markBitMap.mark(start);          // object is live
1068     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
1069     _markBitMap.mark(start + size - 1);
1070                                       // mark end of object
1071   }
1072   // check that oop looks uninitialized
1073   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1074 }
1075 
1076 void CMSCollector::promoted(bool par, HeapWord* start,
1077                             bool is_obj_array, size_t obj_size) {
1078   assert(_markBitMap.covers(start), "Out of bounds");
1079   // See comment in direct_allocated() about when objects should
1080   // be allocated live.
1081   if (_collectorState >= Marking) {
1082     // we already hold the marking bit map lock, taken in
1083     // the prologue
1084     if (par) {
1085       _markBitMap.par_mark(start);
1086     } else {
1087       _markBitMap.mark(start);
1088     }
1089     // We don't need to mark the object as uninitialized (as
1090     // in direct_allocated above) because this is being done with the
1091     // world stopped and the object will be initialized by the
1092     // time the marking, precleaning or sweeping get to look at it.
1093     // But see the code for copying objects into the CMS generation,
1094     // where we need to ensure that concurrent readers of the
1095     // block offset table are able to safely navigate a block that
1096     // is in flux from being free to being allocated (and in
1097     // transition while being copied into) and subsequently
1098     // becoming a bona-fide object when the copy/promotion is complete.
1099     assert(SafepointSynchronize::is_at_safepoint(),
1100            "expect promotion only at safepoints");
1101 
1102     if (_collectorState < Sweeping) {
1103       // Mark the appropriate cards in the modUnionTable, so that
1104       // this object gets scanned before the sweep. If this is
1105       // not done, CMS generation references in the object might
1106       // not get marked.
1107       // For the case of arrays, which are otherwise precisely
1108       // marked, we need to dirty the entire array, not just its head.
1109       if (is_obj_array) {
1110         // The [par_]mark_range() method expects mr.end() below to
1111         // be aligned to the granularity of a bit's representation
1112         // in the heap. In the case of the MUT below, that's a
1113         // card size.
1114         MemRegion mr(start,
1115                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1116                         CardTableModRefBS::card_size /* bytes */));
1117         if (par) {
1118           _modUnionTable.par_mark_range(mr);
1119         } else {
1120           _modUnionTable.mark_range(mr);
1121         }
1122       } else {  // not an obj array; we can just mark the head
1123         if (par) {
1124           _modUnionTable.par_mark(start);
1125         } else {
1126           _modUnionTable.mark(start);
1127         }
1128       }
1129     }
1130   }
1131 }
1132 
1133 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1134 {
1135   size_t delta = pointer_delta(addr, space->bottom());
1136   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1137 }
1138 
1139 void CMSCollector::icms_update_allocation_limits()
1140 {
1141   Generation* young = GenCollectedHeap::heap()->get_gen(0);
1142   EdenSpace* eden = young->as_DefNewGeneration()->eden();
1143 
1144   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1145   if (CMSTraceIncrementalPacing) {
1146     stats().print();
1147   }
1148 
1149   assert(duty_cycle <= 100, "invalid duty cycle");
1150   if (duty_cycle != 0) {
1151     // The duty_cycle is a percentage between 0 and 100; convert to words and
1152     // then compute the offset from the endpoints of the space.
1153     size_t free_words = eden->free() / HeapWordSize;
1154     double free_words_dbl = (double)free_words;
1155     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1156     size_t offset_words = (free_words - duty_cycle_words) / 2;
1157 
1158     _icms_start_limit = eden->top() + offset_words;
1159     _icms_stop_limit = eden->end() - offset_words;
1160 
1161     // The limits may be adjusted (shifted to the right) by
1162     // CMSIncrementalOffset, to allow the application more mutator time after a
1163     // young gen gc (when all mutators were stopped) and before CMS starts and
1164     // takes away one or more cpus.
1165     if (CMSIncrementalOffset != 0) {
1166       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1167       size_t adjustment = (size_t)adjustment_dbl;
1168       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1169       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1170         _icms_start_limit += adjustment;
1171         _icms_stop_limit = tmp_stop;
1172       }
1173     }
1174   }
1175   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1176     _icms_start_limit = _icms_stop_limit = eden->end();
1177   }
1178 
1179   // Install the new start limit.
1180   eden->set_soft_end(_icms_start_limit);
1181 
1182   if (CMSTraceIncrementalMode) {
1183     gclog_or_tty->print(" icms alloc limits:  "
1184                            PTR_FORMAT "," PTR_FORMAT
1185                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1186                            p2i(_icms_start_limit), p2i(_icms_stop_limit),
1187                            percent_of_space(eden, _icms_start_limit),
1188                            percent_of_space(eden, _icms_stop_limit));
1189     if (Verbose) {
1190       gclog_or_tty->print("eden:  ");
1191       eden->print_on(gclog_or_tty);
1192     }
1193   }
1194 }
1195 
1196 // Any changes here should try to maintain the invariant
1197 // that if this method is called with _icms_start_limit
1198 // and _icms_stop_limit both NULL, then it should return NULL
1199 // and not notify the icms thread.
1200 HeapWord*
1201 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1202                                        size_t word_size)
1203 {
1204   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1205   // nop.
1206   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1207     if (top <= _icms_start_limit) {
1208       if (CMSTraceIncrementalMode) {
1209         space->print_on(gclog_or_tty);
1210         gclog_or_tty->stamp();
1211         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1212                                ", new limit=" PTR_FORMAT
1213                                " (" SIZE_FORMAT "%%)",
1214                                p2i(top), p2i(_icms_stop_limit),
1215                                percent_of_space(space, _icms_stop_limit));
1216       }
1217       ConcurrentMarkSweepThread::start_icms();
1218       assert(top < _icms_stop_limit, "Tautology");
1219       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1220         return _icms_stop_limit;
1221       }
1222 
1223       // The allocation will cross both the _start and _stop limits, so do the
1224       // stop notification also and return end().
1225       if (CMSTraceIncrementalMode) {
1226         space->print_on(gclog_or_tty);
1227         gclog_or_tty->stamp();
1228         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1229                                ", new limit=" PTR_FORMAT
1230                                " (" SIZE_FORMAT "%%)",
1231                                p2i(top), p2i(space->end()),
1232                                percent_of_space(space, space->end()));
1233       }
1234       ConcurrentMarkSweepThread::stop_icms();
1235       return space->end();
1236     }
1237 
1238     if (top <= _icms_stop_limit) {
1239       if (CMSTraceIncrementalMode) {
1240         space->print_on(gclog_or_tty);
1241         gclog_or_tty->stamp();
1242         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1243                                ", new limit=" PTR_FORMAT
1244                                " (" SIZE_FORMAT "%%)",
1245                                top, space->end(),
1246                                percent_of_space(space, space->end()));
1247       }
1248       ConcurrentMarkSweepThread::stop_icms();
1249       return space->end();
1250     }
1251 
1252     if (CMSTraceIncrementalMode) {
1253       space->print_on(gclog_or_tty);
1254       gclog_or_tty->stamp();
1255       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1256                              ", new limit=" PTR_FORMAT,
1257                              top, NULL);
1258     }
1259   }
1260 
1261   return NULL;
1262 }
1263 
1264 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1265   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1266   // allocate, copy and if necessary update promoinfo --
1267   // delegate to underlying space.
1268   assert_lock_strong(freelistLock());
1269 
1270 #ifndef PRODUCT
1271   if (Universe::heap()->promotion_should_fail()) {
1272     return NULL;
1273   }
1274 #endif  // #ifndef PRODUCT
1275 
1276   oop res = _cmsSpace->promote(obj, obj_size);
1277   if (res == NULL) {
1278     // expand and retry
1279     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1280     expand(s*HeapWordSize, MinHeapDeltaBytes,
1281       CMSExpansionCause::_satisfy_promotion);
1282     // Since there's currently no next generation, we don't try to promote
1283     // into a more senior generation.
1284     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1285                                "is made to pass on a possibly failing "
1286                                "promotion to next generation");
1287     res = _cmsSpace->promote(obj, obj_size);
1288   }
1289   if (res != NULL) {
1290     // See comment in allocate() about when objects should
1291     // be allocated live.
1292     assert(obj->is_oop(), "Will dereference klass pointer below");
1293     collector()->promoted(false,           // Not parallel
1294                           (HeapWord*)res, obj->is_objArray(), obj_size);
1295     // promotion counters
1296     NOT_PRODUCT(
1297       _numObjectsPromoted++;
1298       _numWordsPromoted +=
1299         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1300     )
1301   }
1302   return res;
1303 }
1304 
1305 
1306 HeapWord*
1307 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1308                                              HeapWord* top,
1309                                              size_t word_sz)
1310 {
1311   return collector()->allocation_limit_reached(space, top, word_sz);
1312 }
1313 
1314 // IMPORTANT: Notes on object size recognition in CMS.
1315 // ---------------------------------------------------
1316 // A block of storage in the CMS generation is always in
1317 // one of three states. A free block (FREE), an allocated
1318 // object (OBJECT) whose size() method reports the correct size,
1319 // and an intermediate state (TRANSIENT) in which its size cannot
1320 // be accurately determined.
1321 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1322 // -----------------------------------------------------
1323 // FREE:      klass_word & 1 == 1; mark_word holds block size
1324 //
1325 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1326 //            obj->size() computes correct size
1327 //
1328 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1329 //
1330 // STATE IDENTIFICATION: (64 bit+COOPS)
1331 // ------------------------------------
1332 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1333 //
1334 // OBJECT:    klass_word installed; klass_word != 0;
1335 //            obj->size() computes correct size
1336 //
1337 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1338 //
1339 //
1340 // STATE TRANSITION DIAGRAM
1341 //
1342 //        mut / parnew                     mut  /  parnew
1343 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1344 //  ^                                                                   |
1345 //  |------------------------ DEAD <------------------------------------|
1346 //         sweep                            mut
1347 //
1348 // While a block is in TRANSIENT state its size cannot be determined
1349 // so readers will either need to come back later or stall until
1350 // the size can be determined. Note that for the case of direct
1351 // allocation, P-bits, when available, may be used to determine the
1352 // size of an object that may not yet have been initialized.
1353 
1354 // Things to support parallel young-gen collection.
1355 oop
1356 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1357                                            oop old, markOop m,
1358                                            size_t word_sz) {
1359 #ifndef PRODUCT
1360   if (Universe::heap()->promotion_should_fail()) {
1361     return NULL;
1362   }
1363 #endif  // #ifndef PRODUCT
1364 
1365   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1366   PromotionInfo* promoInfo = &ps->promo;
1367   // if we are tracking promotions, then first ensure space for
1368   // promotion (including spooling space for saving header if necessary).
1369   // then allocate and copy, then track promoted info if needed.
1370   // When tracking (see PromotionInfo::track()), the mark word may
1371   // be displaced and in this case restoration of the mark word
1372   // occurs in the (oop_since_save_marks_)iterate phase.
1373   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1374     // Out of space for allocating spooling buffers;
1375     // try expanding and allocating spooling buffers.
1376     if (!expand_and_ensure_spooling_space(promoInfo)) {
1377       return NULL;
1378     }
1379   }
1380   assert(promoInfo->has_spooling_space(), "Control point invariant");
1381   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1382   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1383   if (obj_ptr == NULL) {
1384      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1385      if (obj_ptr == NULL) {
1386        return NULL;
1387      }
1388   }
1389   oop obj = oop(obj_ptr);
1390   OrderAccess::storestore();
1391   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1392   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1393   // IMPORTANT: See note on object initialization for CMS above.
1394   // Otherwise, copy the object.  Here we must be careful to insert the
1395   // klass pointer last, since this marks the block as an allocated object.
1396   // Except with compressed oops it's the mark word.
1397   HeapWord* old_ptr = (HeapWord*)old;
1398   // Restore the mark word copied above.
1399   obj->set_mark(m);
1400   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1401   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1402   OrderAccess::storestore();
1403 
1404   if (UseCompressedClassPointers) {
1405     // Copy gap missed by (aligned) header size calculation below
1406     obj->set_klass_gap(old->klass_gap());
1407   }
1408   if (word_sz > (size_t)oopDesc::header_size()) {
1409     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1410                                  obj_ptr + oopDesc::header_size(),
1411                                  word_sz - oopDesc::header_size());
1412   }
1413 
1414   // Now we can track the promoted object, if necessary.  We take care
1415   // to delay the transition from uninitialized to full object
1416   // (i.e., insertion of klass pointer) until after, so that it
1417   // atomically becomes a promoted object.
1418   if (promoInfo->tracking()) {
1419     promoInfo->track((PromotedObject*)obj, old->klass());
1420   }
1421   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1422   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1423   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1424 
1425   // Finally, install the klass pointer (this should be volatile).
1426   OrderAccess::storestore();
1427   obj->set_klass(old->klass());
1428   // We should now be able to calculate the right size for this object
1429   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1430 
1431   collector()->promoted(true,          // parallel
1432                         obj_ptr, old->is_objArray(), word_sz);
1433 
1434   NOT_PRODUCT(
1435     Atomic::inc_ptr(&_numObjectsPromoted);
1436     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1437   )
1438 
1439   return obj;
1440 }
1441 
1442 void
1443 ConcurrentMarkSweepGeneration::
1444 par_promote_alloc_undo(int thread_num,
1445                        HeapWord* obj, size_t word_sz) {
1446   // CMS does not support promotion undo.
1447   ShouldNotReachHere();
1448 }
1449 
1450 void
1451 ConcurrentMarkSweepGeneration::
1452 par_promote_alloc_done(int thread_num) {
1453   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1454   ps->lab.retire(thread_num);
1455 }
1456 
1457 void
1458 ConcurrentMarkSweepGeneration::
1459 par_oop_since_save_marks_iterate_done(int thread_num) {
1460   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1461   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1462   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1463 }
1464 
1465 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1466                                                    size_t size,
1467                                                    bool   tlab)
1468 {
1469   // We allow a STW collection only if a full
1470   // collection was requested.
1471   return full || should_allocate(size, tlab); // FIX ME !!!
1472   // This and promotion failure handling are connected at the
1473   // hip and should be fixed by untying them.
1474 }
1475 
1476 bool CMSCollector::shouldConcurrentCollect() {
1477   if (_full_gc_requested) {
1478     if (Verbose && PrintGCDetails) {
1479       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1480                              " gc request (or gc_locker)");
1481     }
1482     return true;
1483   }
1484 
1485   // For debugging purposes, change the type of collection.
1486   // If the rotation is not on the concurrent collection
1487   // type, don't start a concurrent collection.
1488   NOT_PRODUCT(
1489     if (RotateCMSCollectionTypes &&
1490         (_cmsGen->debug_collection_type() !=
1491           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1492       assert(_cmsGen->debug_collection_type() !=
1493         ConcurrentMarkSweepGeneration::Unknown_collection_type,
1494         "Bad cms collection type");
1495       return false;
1496     }
1497   )
1498 
1499   FreelistLocker x(this);
1500   // ------------------------------------------------------------------
1501   // Print out lots of information which affects the initiation of
1502   // a collection.
1503   if (PrintCMSInitiationStatistics && stats().valid()) {
1504     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1505     gclog_or_tty->stamp();
1506     gclog_or_tty->cr();
1507     stats().print_on(gclog_or_tty);
1508     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1509       stats().time_until_cms_gen_full());
1510     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1511     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1512                            _cmsGen->contiguous_available());
1513     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1514     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1515     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1516     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1517     gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1518     gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1519     gclog_or_tty->print_cr("metadata initialized %d",
1520       MetaspaceGC::should_concurrent_collect());
1521   }
1522   // ------------------------------------------------------------------
1523 
1524   // If the estimated time to complete a cms collection (cms_duration())
1525   // is less than the estimated time remaining until the cms generation
1526   // is full, start a collection.
1527   if (!UseCMSInitiatingOccupancyOnly) {
1528     if (stats().valid()) {
1529       if (stats().time_until_cms_start() == 0.0) {
1530         return true;
1531       }
1532     } else {
1533       // We want to conservatively collect somewhat early in order
1534       // to try and "bootstrap" our CMS/promotion statistics;
1535       // this branch will not fire after the first successful CMS
1536       // collection because the stats should then be valid.
1537       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1538         if (Verbose && PrintGCDetails) {
1539           gclog_or_tty->print_cr(
1540             " CMSCollector: collect for bootstrapping statistics:"
1541             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1542             _bootstrap_occupancy);
1543         }
1544         return true;
1545       }
1546     }
1547   }
1548 
1549   // Otherwise, we start a collection cycle if
1550   // old gen want a collection cycle started. Each may use
1551   // an appropriate criterion for making this decision.
1552   // XXX We need to make sure that the gen expansion
1553   // criterion dovetails well with this. XXX NEED TO FIX THIS
1554   if (_cmsGen->should_concurrent_collect()) {
1555     if (Verbose && PrintGCDetails) {
1556       gclog_or_tty->print_cr("CMS old gen initiated");
1557     }
1558     return true;
1559   }
1560 
1561   // We start a collection if we believe an incremental collection may fail;
1562   // this is not likely to be productive in practice because it's probably too
1563   // late anyway.
1564   GenCollectedHeap* gch = GenCollectedHeap::heap();
1565   assert(gch->collector_policy()->is_generation_policy(),
1566          "You may want to check the correctness of the following");
1567   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1568     if (Verbose && PrintGCDetails) {
1569       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1570     }
1571     return true;
1572   }
1573 
1574   if (MetaspaceGC::should_concurrent_collect()) {
1575       if (Verbose && PrintGCDetails) {
1576       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1577       }
1578       return true;
1579     }
1580 
1581   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1582   if (CMSTriggerInterval >= 0) {
1583     if (CMSTriggerInterval == 0) {
1584       // Trigger always
1585       return true;
1586     }
1587 
1588     // Check the CMS time since begin (we do not check the stats validity
1589     // as we want to be able to trigger the first CMS cycle as well)
1590     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1591       if (Verbose && PrintGCDetails) {
1592         if (stats().valid()) {
1593           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1594                                  stats().cms_time_since_begin());
1595         } else {
1596           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1597         }
1598       }
1599       return true;
1600     }
1601   }
1602 
1603   return false;
1604 }
1605 
1606 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1607 
1608 // Clear _expansion_cause fields of constituent generations
1609 void CMSCollector::clear_expansion_cause() {
1610   _cmsGen->clear_expansion_cause();
1611 }
1612 
1613 // We should be conservative in starting a collection cycle.  To
1614 // start too eagerly runs the risk of collecting too often in the
1615 // extreme.  To collect too rarely falls back on full collections,
1616 // which works, even if not optimum in terms of concurrent work.
1617 // As a work around for too eagerly collecting, use the flag
1618 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1619 // giving the user an easily understandable way of controlling the
1620 // collections.
1621 // We want to start a new collection cycle if any of the following
1622 // conditions hold:
1623 // . our current occupancy exceeds the configured initiating occupancy
1624 //   for this generation, or
1625 // . we recently needed to expand this space and have not, since that
1626 //   expansion, done a collection of this generation, or
1627 // . the underlying space believes that it may be a good idea to initiate
1628 //   a concurrent collection (this may be based on criteria such as the
1629 //   following: the space uses linear allocation and linear allocation is
1630 //   going to fail, or there is believed to be excessive fragmentation in
1631 //   the generation, etc... or ...
1632 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1633 //   the case of the old generation; see CR 6543076):
1634 //   we may be approaching a point at which allocation requests may fail because
1635 //   we will be out of sufficient free space given allocation rate estimates.]
1636 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1637 
1638   assert_lock_strong(freelistLock());
1639   if (occupancy() > initiating_occupancy()) {
1640     if (PrintGCDetails && Verbose) {
1641       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1642         short_name(), occupancy(), initiating_occupancy());
1643     }
1644     return true;
1645   }
1646   if (UseCMSInitiatingOccupancyOnly) {
1647     return false;
1648   }
1649   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1650     if (PrintGCDetails && Verbose) {
1651       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1652         short_name());
1653     }
1654     return true;
1655   }
1656   if (_cmsSpace->should_concurrent_collect()) {
1657     if (PrintGCDetails && Verbose) {
1658       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1659         short_name());
1660     }
1661     return true;
1662   }
1663   return false;
1664 }
1665 
1666 void ConcurrentMarkSweepGeneration::collect(bool   full,
1667                                             bool   clear_all_soft_refs,
1668                                             size_t size,
1669                                             bool   tlab)
1670 {
1671   collector()->collect(full, clear_all_soft_refs, size, tlab);
1672 }
1673 
1674 void CMSCollector::collect(bool   full,
1675                            bool   clear_all_soft_refs,
1676                            size_t size,
1677                            bool   tlab)
1678 {
1679   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1680     // For debugging purposes skip the collection if the state
1681     // is not currently idle
1682     if (TraceCMSState) {
1683       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1684         Thread::current(), full, _collectorState);
1685     }
1686     return;
1687   }
1688 
1689   // The following "if" branch is present for defensive reasons.
1690   // In the current uses of this interface, it can be replaced with:
1691   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1692   // But I am not placing that assert here to allow future
1693   // generality in invoking this interface.
1694   if (GC_locker::is_active()) {
1695     // A consistency test for GC_locker
1696     assert(GC_locker::needs_gc(), "Should have been set already");
1697     // Skip this foreground collection, instead
1698     // expanding the heap if necessary.
1699     // Need the free list locks for the call to free() in compute_new_size()
1700     compute_new_size();
1701     return;
1702   }
1703   acquire_control_and_collect(full, clear_all_soft_refs);
1704   _full_gcs_since_conc_gc++;
1705 }
1706 
1707 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1708   GenCollectedHeap* gch = GenCollectedHeap::heap();
1709   unsigned int gc_count = gch->total_full_collections();
1710   if (gc_count == full_gc_count) {
1711     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1712     _full_gc_requested = true;
1713     _full_gc_cause = cause;
1714     CGC_lock->notify();   // nudge CMS thread
1715   } else {
1716     assert(gc_count > full_gc_count, "Error: causal loop");
1717   }
1718 }
1719 
1720 bool CMSCollector::is_external_interruption() {
1721   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1722   return GCCause::is_user_requested_gc(cause) ||
1723          GCCause::is_serviceability_requested_gc(cause);
1724 }
1725 
1726 void CMSCollector::report_concurrent_mode_interruption() {
1727   if (is_external_interruption()) {
1728     if (PrintGCDetails) {
1729       gclog_or_tty->print(" (concurrent mode interrupted)");
1730     }
1731   } else {
1732     if (PrintGCDetails) {
1733       gclog_or_tty->print(" (concurrent mode failure)");
1734     }
1735     _gc_tracer_cm->report_concurrent_mode_failure();
1736   }
1737 }
1738 
1739 
1740 // The foreground and background collectors need to coordinate in order
1741 // to make sure that they do not mutually interfere with CMS collections.
1742 // When a background collection is active,
1743 // the foreground collector may need to take over (preempt) and
1744 // synchronously complete an ongoing collection. Depending on the
1745 // frequency of the background collections and the heap usage
1746 // of the application, this preemption can be seldom or frequent.
1747 // There are only certain
1748 // points in the background collection that the "collection-baton"
1749 // can be passed to the foreground collector.
1750 //
1751 // The foreground collector will wait for the baton before
1752 // starting any part of the collection.  The foreground collector
1753 // will only wait at one location.
1754 //
1755 // The background collector will yield the baton before starting a new
1756 // phase of the collection (e.g., before initial marking, marking from roots,
1757 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1758 // of the loop which switches the phases. The background collector does some
1759 // of the phases (initial mark, final re-mark) with the world stopped.
1760 // Because of locking involved in stopping the world,
1761 // the foreground collector should not block waiting for the background
1762 // collector when it is doing a stop-the-world phase.  The background
1763 // collector will yield the baton at an additional point just before
1764 // it enters a stop-the-world phase.  Once the world is stopped, the
1765 // background collector checks the phase of the collection.  If the
1766 // phase has not changed, it proceeds with the collection.  If the
1767 // phase has changed, it skips that phase of the collection.  See
1768 // the comments on the use of the Heap_lock in collect_in_background().
1769 //
1770 // Variable used in baton passing.
1771 //   _foregroundGCIsActive - Set to true by the foreground collector when
1772 //      it wants the baton.  The foreground clears it when it has finished
1773 //      the collection.
1774 //   _foregroundGCShouldWait - Set to true by the background collector
1775 //        when it is running.  The foreground collector waits while
1776 //      _foregroundGCShouldWait is true.
1777 //  CGC_lock - monitor used to protect access to the above variables
1778 //      and to notify the foreground and background collectors.
1779 //  _collectorState - current state of the CMS collection.
1780 //
1781 // The foreground collector
1782 //   acquires the CGC_lock
1783 //   sets _foregroundGCIsActive
1784 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1785 //     various locks acquired in preparation for the collection
1786 //     are released so as not to block the background collector
1787 //     that is in the midst of a collection
1788 //   proceeds with the collection
1789 //   clears _foregroundGCIsActive
1790 //   returns
1791 //
1792 // The background collector in a loop iterating on the phases of the
1793 //      collection
1794 //   acquires the CGC_lock
1795 //   sets _foregroundGCShouldWait
1796 //   if _foregroundGCIsActive is set
1797 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1798 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1799 //     and exits the loop.
1800 //   otherwise
1801 //     proceed with that phase of the collection
1802 //     if the phase is a stop-the-world phase,
1803 //       yield the baton once more just before enqueueing
1804 //       the stop-world CMS operation (executed by the VM thread).
1805 //   returns after all phases of the collection are done
1806 //
1807 
1808 void CMSCollector::acquire_control_and_collect(bool full,
1809         bool clear_all_soft_refs) {
1810   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1811   assert(!Thread::current()->is_ConcurrentGC_thread(),
1812          "shouldn't try to acquire control from self!");
1813 
1814   // Start the protocol for acquiring control of the
1815   // collection from the background collector (aka CMS thread).
1816   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1817          "VM thread should have CMS token");
1818   // Remember the possibly interrupted state of an ongoing
1819   // concurrent collection
1820   CollectorState first_state = _collectorState;
1821 
1822   // Signal to a possibly ongoing concurrent collection that
1823   // we want to do a foreground collection.
1824   _foregroundGCIsActive = true;
1825 
1826   // Disable incremental mode during a foreground collection.
1827   ICMSDisabler icms_disabler;
1828 
1829   // release locks and wait for a notify from the background collector
1830   // releasing the locks in only necessary for phases which
1831   // do yields to improve the granularity of the collection.
1832   assert_lock_strong(bitMapLock());
1833   // We need to lock the Free list lock for the space that we are
1834   // currently collecting.
1835   assert(haveFreelistLocks(), "Must be holding free list locks");
1836   bitMapLock()->unlock();
1837   releaseFreelistLocks();
1838   {
1839     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1840     if (_foregroundGCShouldWait) {
1841       // We are going to be waiting for action for the CMS thread;
1842       // it had better not be gone (for instance at shutdown)!
1843       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1844              "CMS thread must be running");
1845       // Wait here until the background collector gives us the go-ahead
1846       ConcurrentMarkSweepThread::clear_CMS_flag(
1847         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1848       // Get a possibly blocked CMS thread going:
1849       //   Note that we set _foregroundGCIsActive true above,
1850       //   without protection of the CGC_lock.
1851       CGC_lock->notify();
1852       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1853              "Possible deadlock");
1854       while (_foregroundGCShouldWait) {
1855         // wait for notification
1856         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1857         // Possibility of delay/starvation here, since CMS token does
1858         // not know to give priority to VM thread? Actually, i think
1859         // there wouldn't be any delay/starvation, but the proof of
1860         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1861       }
1862       ConcurrentMarkSweepThread::set_CMS_flag(
1863         ConcurrentMarkSweepThread::CMS_vm_has_token);
1864     }
1865   }
1866   // The CMS_token is already held.  Get back the other locks.
1867   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1868          "VM thread should have CMS token");
1869   getFreelistLocks();
1870   bitMapLock()->lock_without_safepoint_check();
1871   if (TraceCMSState) {
1872     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1873       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1874     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1875   }
1876 
1877   // Check if we need to do a compaction, or if not, whether
1878   // we need to start the mark-sweep from scratch.
1879   bool should_compact    = false;
1880   bool should_start_over = false;
1881   decide_foreground_collection_type(clear_all_soft_refs,
1882     &should_compact, &should_start_over);
1883 
1884 NOT_PRODUCT(
1885   if (RotateCMSCollectionTypes) {
1886     if (_cmsGen->debug_collection_type() ==
1887         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1888       should_compact = true;
1889     } else if (_cmsGen->debug_collection_type() ==
1890                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1891       should_compact = false;
1892     }
1893   }
1894 )
1895 
1896   if (first_state > Idling) {
1897     report_concurrent_mode_interruption();
1898   }
1899 
1900   set_did_compact(should_compact);
1901   if (should_compact) {
1902     // If the collection is being acquired from the background
1903     // collector, there may be references on the discovered
1904     // references lists that have NULL referents (being those
1905     // that were concurrently cleared by a mutator) or
1906     // that are no longer active (having been enqueued concurrently
1907     // by the mutator).
1908     // Scrub the list of those references because Mark-Sweep-Compact
1909     // code assumes referents are not NULL and that all discovered
1910     // Reference objects are active.
1911     ref_processor()->clean_up_discovered_references();
1912 
1913     if (first_state > Idling) {
1914       save_heap_summary();
1915     }
1916 
1917     do_compaction_work(clear_all_soft_refs);
1918 
1919     // Has the GC time limit been exceeded?
1920     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1921     size_t max_eden_size = young_gen->max_capacity() -
1922                            young_gen->to()->capacity() -
1923                            young_gen->from()->capacity();
1924     GenCollectedHeap* gch = GenCollectedHeap::heap();
1925     GCCause::Cause gc_cause = gch->gc_cause();
1926     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1927                                            young_gen->eden()->used(),
1928                                            _cmsGen->max_capacity(),
1929                                            max_eden_size,
1930                                            full,
1931                                            gc_cause,
1932                                            gch->collector_policy());
1933   } else {
1934     do_mark_sweep_work(clear_all_soft_refs, first_state,
1935       should_start_over);
1936   }
1937   // Reset the expansion cause, now that we just completed
1938   // a collection cycle.
1939   clear_expansion_cause();
1940   _foregroundGCIsActive = false;
1941   return;
1942 }
1943 
1944 // Resize the tenured generation
1945 // after obtaining the free list locks for the
1946 // two generations.
1947 void CMSCollector::compute_new_size() {
1948   assert_locked_or_safepoint(Heap_lock);
1949   FreelistLocker z(this);
1950   MetaspaceGC::compute_new_size();
1951   _cmsGen->compute_new_size_free_list();
1952 }
1953 
1954 // A work method used by foreground collection to determine
1955 // what type of collection (compacting or not, continuing or fresh)
1956 // it should do.
1957 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1958 // and CMSCompactWhenClearAllSoftRefs the default in the future
1959 // and do away with the flags after a suitable period.
1960 void CMSCollector::decide_foreground_collection_type(
1961   bool clear_all_soft_refs, bool* should_compact,
1962   bool* should_start_over) {
1963   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1964   // flag is set, and we have either requested a System.gc() or
1965   // the number of full gc's since the last concurrent cycle
1966   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1967   // or if an incremental collection has failed
1968   GenCollectedHeap* gch = GenCollectedHeap::heap();
1969   assert(gch->collector_policy()->is_generation_policy(),
1970          "You may want to check the correctness of the following");
1971   // Inform cms gen if this was due to partial collection failing.
1972   // The CMS gen may use this fact to determine its expansion policy.
1973   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1974     assert(!_cmsGen->incremental_collection_failed(),
1975            "Should have been noticed, reacted to and cleared");
1976     _cmsGen->set_incremental_collection_failed();
1977   }
1978   *should_compact =
1979     UseCMSCompactAtFullCollection &&
1980     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1981      GCCause::is_user_requested_gc(gch->gc_cause()) ||
1982      gch->incremental_collection_will_fail(true /* consult_young */));
1983   *should_start_over = false;
1984   if (clear_all_soft_refs && !*should_compact) {
1985     // We are about to do a last ditch collection attempt
1986     // so it would normally make sense to do a compaction
1987     // to reclaim as much space as possible.
1988     if (CMSCompactWhenClearAllSoftRefs) {
1989       // Default: The rationale is that in this case either
1990       // we are past the final marking phase, in which case
1991       // we'd have to start over, or so little has been done
1992       // that there's little point in saving that work. Compaction
1993       // appears to be the sensible choice in either case.
1994       *should_compact = true;
1995     } else {
1996       // We have been asked to clear all soft refs, but not to
1997       // compact. Make sure that we aren't past the final checkpoint
1998       // phase, for that is where we process soft refs. If we are already
1999       // past that phase, we'll need to redo the refs discovery phase and
2000       // if necessary clear soft refs that weren't previously
2001       // cleared. We do so by remembering the phase in which
2002       // we came in, and if we are past the refs processing
2003       // phase, we'll choose to just redo the mark-sweep
2004       // collection from scratch.
2005       if (_collectorState > FinalMarking) {
2006         // We are past the refs processing phase;
2007         // start over and do a fresh synchronous CMS cycle
2008         _collectorState = Resetting; // skip to reset to start new cycle
2009         reset(false /* == !asynch */);
2010         *should_start_over = true;
2011       } // else we can continue a possibly ongoing current cycle
2012     }
2013   }
2014 }
2015 
2016 // A work method used by the foreground collector to do
2017 // a mark-sweep-compact.
2018 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
2019   GenCollectedHeap* gch = GenCollectedHeap::heap();
2020 
2021   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2022   gc_timer->register_gc_start();
2023 
2024   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2025   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2026 
2027   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
2028   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2029     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2030       "collections passed to foreground collector", _full_gcs_since_conc_gc);
2031   }
2032 
2033   // Sample collection interval time and reset for collection pause.
2034   if (UseAdaptiveSizePolicy) {
2035     size_policy()->msc_collection_begin();
2036   }
2037 
2038   // Temporarily widen the span of the weak reference processing to
2039   // the entire heap.
2040   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2041   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2042   // Temporarily, clear the "is_alive_non_header" field of the
2043   // reference processor.
2044   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2045   // Temporarily make reference _processing_ single threaded (non-MT).
2046   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2047   // Temporarily make refs discovery atomic
2048   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2049   // Temporarily make reference _discovery_ single threaded (non-MT)
2050   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2051 
2052   ref_processor()->set_enqueuing_is_done(false);
2053   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2054   ref_processor()->setup_policy(clear_all_soft_refs);
2055   // If an asynchronous collection finishes, the _modUnionTable is
2056   // all clear.  If we are assuming the collection from an asynchronous
2057   // collection, clear the _modUnionTable.
2058   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2059     "_modUnionTable should be clear if the baton was not passed");
2060   _modUnionTable.clear_all();
2061   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2062     "mod union for klasses should be clear if the baton was passed");
2063   _ct->klass_rem_set()->clear_mod_union();
2064 
2065   // We must adjust the allocation statistics being maintained
2066   // in the free list space. We do so by reading and clearing
2067   // the sweep timer and updating the block flux rate estimates below.
2068   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2069   if (_inter_sweep_timer.is_active()) {
2070     _inter_sweep_timer.stop();
2071     // Note that we do not use this sample to update the _inter_sweep_estimate.
2072     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2073                                             _inter_sweep_estimate.padded_average(),
2074                                             _intra_sweep_estimate.padded_average());
2075   }
2076 
2077   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2078     ref_processor(), clear_all_soft_refs);
2079   #ifdef ASSERT
2080     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2081     size_t free_size = cms_space->free();
2082     assert(free_size ==
2083            pointer_delta(cms_space->end(), cms_space->compaction_top())
2084            * HeapWordSize,
2085       "All the free space should be compacted into one chunk at top");
2086     assert(cms_space->dictionary()->total_chunk_size(
2087                                       debug_only(cms_space->freelistLock())) == 0 ||
2088            cms_space->totalSizeInIndexedFreeLists() == 0,
2089       "All the free space should be in a single chunk");
2090     size_t num = cms_space->totalCount();
2091     assert((free_size == 0 && num == 0) ||
2092            (free_size > 0  && (num == 1 || num == 2)),
2093          "There should be at most 2 free chunks after compaction");
2094   #endif // ASSERT
2095   _collectorState = Resetting;
2096   assert(_restart_addr == NULL,
2097          "Should have been NULL'd before baton was passed");
2098   reset(false /* == !asynch */);
2099   _cmsGen->reset_after_compaction();
2100   _concurrent_cycles_since_last_unload = 0;
2101 
2102   // Clear any data recorded in the PLAB chunk arrays.
2103   if (_survivor_plab_array != NULL) {
2104     reset_survivor_plab_arrays();
2105   }
2106 
2107   // Adjust the per-size allocation stats for the next epoch.
2108   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2109   // Restart the "inter sweep timer" for the next epoch.
2110   _inter_sweep_timer.reset();
2111   _inter_sweep_timer.start();
2112 
2113   // Sample collection pause time and reset for collection interval.
2114   if (UseAdaptiveSizePolicy) {
2115     size_policy()->msc_collection_end(gch->gc_cause());
2116   }
2117 
2118   gc_timer->register_gc_end();
2119 
2120   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2121 
2122   // For a mark-sweep-compact, compute_new_size() will be called
2123   // in the heap's do_collection() method.
2124 }
2125 
2126 // A work method used by the foreground collector to do
2127 // a mark-sweep, after taking over from a possibly on-going
2128 // concurrent mark-sweep collection.
2129 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2130   CollectorState first_state, bool should_start_over) {
2131   if (PrintGC && Verbose) {
2132     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2133       "collector with count %d",
2134       _full_gcs_since_conc_gc);
2135   }
2136   switch (_collectorState) {
2137     case Idling:
2138       if (first_state == Idling || should_start_over) {
2139         // The background GC was not active, or should
2140         // restarted from scratch;  start the cycle.
2141         _collectorState = InitialMarking;
2142       }
2143       // If first_state was not Idling, then a background GC
2144       // was in progress and has now finished.  No need to do it
2145       // again.  Leave the state as Idling.
2146       break;
2147     case Precleaning:
2148       // In the foreground case don't do the precleaning since
2149       // it is not done concurrently and there is extra work
2150       // required.
2151       _collectorState = FinalMarking;
2152   }
2153   collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2154 
2155   // For a mark-sweep, compute_new_size() will be called
2156   // in the heap's do_collection() method.
2157 }
2158 
2159 
2160 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2161   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2162   EdenSpace* eden_space = dng->eden();
2163   ContiguousSpace* from_space = dng->from();
2164   ContiguousSpace* to_space   = dng->to();
2165   // Eden
2166   if (_eden_chunk_array != NULL) {
2167     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2168                            eden_space->bottom(), eden_space->top(),
2169                            eden_space->end(), eden_space->capacity());
2170     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2171                            "_eden_chunk_capacity=" SIZE_FORMAT,
2172                            _eden_chunk_index, _eden_chunk_capacity);
2173     for (size_t i = 0; i < _eden_chunk_index; i++) {
2174       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2175                              i, _eden_chunk_array[i]);
2176     }
2177   }
2178   // Survivor
2179   if (_survivor_chunk_array != NULL) {
2180     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2181                            from_space->bottom(), from_space->top(),
2182                            from_space->end(), from_space->capacity());
2183     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2184                            "_survivor_chunk_capacity=" SIZE_FORMAT,
2185                            _survivor_chunk_index, _survivor_chunk_capacity);
2186     for (size_t i = 0; i < _survivor_chunk_index; i++) {
2187       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2188                              i, _survivor_chunk_array[i]);
2189     }
2190   }
2191 }
2192 
2193 void CMSCollector::getFreelistLocks() const {
2194   // Get locks for all free lists in all generations that this
2195   // collector is responsible for
2196   _cmsGen->freelistLock()->lock_without_safepoint_check();
2197 }
2198 
2199 void CMSCollector::releaseFreelistLocks() const {
2200   // Release locks for all free lists in all generations that this
2201   // collector is responsible for
2202   _cmsGen->freelistLock()->unlock();
2203 }
2204 
2205 bool CMSCollector::haveFreelistLocks() const {
2206   // Check locks for all free lists in all generations that this
2207   // collector is responsible for
2208   assert_lock_strong(_cmsGen->freelistLock());
2209   PRODUCT_ONLY(ShouldNotReachHere());
2210   return true;
2211 }
2212 
2213 // A utility class that is used by the CMS collector to
2214 // temporarily "release" the foreground collector from its
2215 // usual obligation to wait for the background collector to
2216 // complete an ongoing phase before proceeding.
2217 class ReleaseForegroundGC: public StackObj {
2218  private:
2219   CMSCollector* _c;
2220  public:
2221   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2222     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2223     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2224     // allow a potentially blocked foreground collector to proceed
2225     _c->_foregroundGCShouldWait = false;
2226     if (_c->_foregroundGCIsActive) {
2227       CGC_lock->notify();
2228     }
2229     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2230            "Possible deadlock");
2231   }
2232 
2233   ~ReleaseForegroundGC() {
2234     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2235     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2236     _c->_foregroundGCShouldWait = true;
2237   }
2238 };
2239 
2240 // There are separate collect_in_background and collect_in_foreground because of
2241 // the different locking requirements of the background collector and the
2242 // foreground collector.  There was originally an attempt to share
2243 // one "collect" method between the background collector and the foreground
2244 // collector but the if-then-else required made it cleaner to have
2245 // separate methods.
2246 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2247   assert(Thread::current()->is_ConcurrentGC_thread(),
2248     "A CMS asynchronous collection is only allowed on a CMS thread.");
2249 
2250   GenCollectedHeap* gch = GenCollectedHeap::heap();
2251   {
2252     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2253     MutexLockerEx hl(Heap_lock, safepoint_check);
2254     FreelistLocker fll(this);
2255     MutexLockerEx x(CGC_lock, safepoint_check);
2256     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2257       // The foreground collector is active or we're
2258       // not using asynchronous collections.  Skip this
2259       // background collection.
2260       assert(!_foregroundGCShouldWait, "Should be clear");
2261       return;
2262     } else {
2263       assert(_collectorState == Idling, "Should be idling before start.");
2264       _collectorState = InitialMarking;
2265       register_gc_start(cause);
2266       // Reset the expansion cause, now that we are about to begin
2267       // a new cycle.
2268       clear_expansion_cause();
2269 
2270       // Clear the MetaspaceGC flag since a concurrent collection
2271       // is starting but also clear it after the collection.
2272       MetaspaceGC::set_should_concurrent_collect(false);
2273     }
2274     // Decide if we want to enable class unloading as part of the
2275     // ensuing concurrent GC cycle.
2276     update_should_unload_classes();
2277     _full_gc_requested = false;           // acks all outstanding full gc requests
2278     _full_gc_cause = GCCause::_no_gc;
2279     // Signal that we are about to start a collection
2280     gch->increment_total_full_collections();  // ... starting a collection cycle
2281     _collection_count_start = gch->total_full_collections();
2282   }
2283 
2284   // Used for PrintGC
2285   size_t prev_used;
2286   if (PrintGC && Verbose) {
2287     prev_used = _cmsGen->used(); // XXXPERM
2288   }
2289 
2290   // The change of the collection state is normally done at this level;
2291   // the exceptions are phases that are executed while the world is
2292   // stopped.  For those phases the change of state is done while the
2293   // world is stopped.  For baton passing purposes this allows the
2294   // background collector to finish the phase and change state atomically.
2295   // The foreground collector cannot wait on a phase that is done
2296   // while the world is stopped because the foreground collector already
2297   // has the world stopped and would deadlock.
2298   while (_collectorState != Idling) {
2299     if (TraceCMSState) {
2300       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2301         Thread::current(), _collectorState);
2302     }
2303     // The foreground collector
2304     //   holds the Heap_lock throughout its collection.
2305     //   holds the CMS token (but not the lock)
2306     //     except while it is waiting for the background collector to yield.
2307     //
2308     // The foreground collector should be blocked (not for long)
2309     //   if the background collector is about to start a phase
2310     //   executed with world stopped.  If the background
2311     //   collector has already started such a phase, the
2312     //   foreground collector is blocked waiting for the
2313     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
2314     //   are executed in the VM thread.
2315     //
2316     // The locking order is
2317     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
2318     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
2319     //   CMS token  (claimed in
2320     //                stop_world_and_do() -->
2321     //                  safepoint_synchronize() -->
2322     //                    CMSThread::synchronize())
2323 
2324     {
2325       // Check if the FG collector wants us to yield.
2326       CMSTokenSync x(true); // is cms thread
2327       if (waitForForegroundGC()) {
2328         // We yielded to a foreground GC, nothing more to be
2329         // done this round.
2330         assert(_foregroundGCShouldWait == false, "We set it to false in "
2331                "waitForForegroundGC()");
2332         if (TraceCMSState) {
2333           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2334             " exiting collection CMS state %d",
2335             Thread::current(), _collectorState);
2336         }
2337         return;
2338       } else {
2339         // The background collector can run but check to see if the
2340         // foreground collector has done a collection while the
2341         // background collector was waiting to get the CGC_lock
2342         // above.  If yes, break so that _foregroundGCShouldWait
2343         // is cleared before returning.
2344         if (_collectorState == Idling) {
2345           break;
2346         }
2347       }
2348     }
2349 
2350     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2351       "should be waiting");
2352 
2353     switch (_collectorState) {
2354       case InitialMarking:
2355         {
2356           ReleaseForegroundGC x(this);
2357           stats().record_cms_begin();
2358           VM_CMS_Initial_Mark initial_mark_op(this);
2359           VMThread::execute(&initial_mark_op);
2360         }
2361         // The collector state may be any legal state at this point
2362         // since the background collector may have yielded to the
2363         // foreground collector.
2364         break;
2365       case Marking:
2366         // initial marking in checkpointRootsInitialWork has been completed
2367         if (markFromRoots(true)) { // we were successful
2368           assert(_collectorState == Precleaning, "Collector state should "
2369             "have changed");
2370         } else {
2371           assert(_foregroundGCIsActive, "Internal state inconsistency");
2372         }
2373         break;
2374       case Precleaning:
2375         if (UseAdaptiveSizePolicy) {
2376           size_policy()->concurrent_precleaning_begin();
2377         }
2378         // marking from roots in markFromRoots has been completed
2379         preclean();
2380         if (UseAdaptiveSizePolicy) {
2381           size_policy()->concurrent_precleaning_end();
2382         }
2383         assert(_collectorState == AbortablePreclean ||
2384                _collectorState == FinalMarking,
2385                "Collector state should have changed");
2386         break;
2387       case AbortablePreclean:
2388         if (UseAdaptiveSizePolicy) {
2389         size_policy()->concurrent_phases_resume();
2390         }
2391         abortable_preclean();
2392         if (UseAdaptiveSizePolicy) {
2393           size_policy()->concurrent_precleaning_end();
2394         }
2395         assert(_collectorState == FinalMarking, "Collector state should "
2396           "have changed");
2397         break;
2398       case FinalMarking:
2399         {
2400           ReleaseForegroundGC x(this);
2401 
2402           VM_CMS_Final_Remark final_remark_op(this);
2403           VMThread::execute(&final_remark_op);
2404         }
2405         assert(_foregroundGCShouldWait, "block post-condition");
2406         break;
2407       case Sweeping:
2408         if (UseAdaptiveSizePolicy) {
2409           size_policy()->concurrent_sweeping_begin();
2410         }
2411         // final marking in checkpointRootsFinal has been completed
2412         sweep(true);
2413         assert(_collectorState == Resizing, "Collector state change "
2414           "to Resizing must be done under the free_list_lock");
2415         _full_gcs_since_conc_gc = 0;
2416 
2417         // Stop the timers for adaptive size policy for the concurrent phases
2418         if (UseAdaptiveSizePolicy) {
2419           size_policy()->concurrent_sweeping_end();
2420           size_policy()->concurrent_phases_end(gch->gc_cause(),
2421                                              gch->prev_gen(_cmsGen)->capacity(),
2422                                              _cmsGen->free());
2423         }
2424 
2425       case Resizing: {
2426         // Sweeping has been completed...
2427         // At this point the background collection has completed.
2428         // Don't move the call to compute_new_size() down
2429         // into code that might be executed if the background
2430         // collection was preempted.
2431         {
2432           ReleaseForegroundGC x(this);   // unblock FG collection
2433           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2434           CMSTokenSync        z(true);   // not strictly needed.
2435           if (_collectorState == Resizing) {
2436             compute_new_size();
2437             save_heap_summary();
2438             _collectorState = Resetting;
2439           } else {
2440             assert(_collectorState == Idling, "The state should only change"
2441                    " because the foreground collector has finished the collection");
2442           }
2443         }
2444         break;
2445       }
2446       case Resetting:
2447         // CMS heap resizing has been completed
2448         reset(true);
2449         assert(_collectorState == Idling, "Collector state should "
2450           "have changed");
2451 
2452         MetaspaceGC::set_should_concurrent_collect(false);
2453 
2454         stats().record_cms_end();
2455         // Don't move the concurrent_phases_end() and compute_new_size()
2456         // calls to here because a preempted background collection
2457         // has it's state set to "Resetting".
2458         break;
2459       case Idling:
2460       default:
2461         ShouldNotReachHere();
2462         break;
2463     }
2464     if (TraceCMSState) {
2465       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2466         Thread::current(), _collectorState);
2467     }
2468     assert(_foregroundGCShouldWait, "block post-condition");
2469   }
2470 
2471   // Should this be in gc_epilogue?
2472   collector_policy()->counters()->update_counters();
2473 
2474   {
2475     // Clear _foregroundGCShouldWait and, in the event that the
2476     // foreground collector is waiting, notify it, before
2477     // returning.
2478     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2479     _foregroundGCShouldWait = false;
2480     if (_foregroundGCIsActive) {
2481       CGC_lock->notify();
2482     }
2483     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2484            "Possible deadlock");
2485   }
2486   if (TraceCMSState) {
2487     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2488       " exiting collection CMS state %d",
2489       Thread::current(), _collectorState);
2490   }
2491   if (PrintGC && Verbose) {
2492     _cmsGen->print_heap_change(prev_used);
2493   }
2494 }
2495 
2496 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2497   if (!_cms_start_registered) {
2498     register_gc_start(cause);
2499   }
2500 }
2501 
2502 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2503   _cms_start_registered = true;
2504   _gc_timer_cm->register_gc_start();
2505   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2506 }
2507 
2508 void CMSCollector::register_gc_end() {
2509   if (_cms_start_registered) {
2510     report_heap_summary(GCWhen::AfterGC);
2511 
2512     _gc_timer_cm->register_gc_end();
2513     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2514     _cms_start_registered = false;
2515   }
2516 }
2517 
2518 void CMSCollector::save_heap_summary() {
2519   GenCollectedHeap* gch = GenCollectedHeap::heap();
2520   _last_heap_summary = gch->create_heap_summary();
2521   _last_metaspace_summary = gch->create_metaspace_summary();
2522 }
2523 
2524 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2525   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2526   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2527 }
2528 
2529 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2530   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2531          "Foreground collector should be waiting, not executing");
2532   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2533     "may only be done by the VM Thread with the world stopped");
2534   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2535          "VM thread should have CMS token");
2536 
2537   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2538     true, NULL);)
2539   if (UseAdaptiveSizePolicy) {
2540     size_policy()->ms_collection_begin();
2541   }
2542   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2543 
2544   HandleMark hm;  // Discard invalid handles created during verification
2545 
2546   if (VerifyBeforeGC &&
2547       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2548     Universe::verify();
2549   }
2550 
2551   // Snapshot the soft reference policy to be used in this collection cycle.
2552   ref_processor()->setup_policy(clear_all_soft_refs);
2553 
2554   // Decide if class unloading should be done
2555   update_should_unload_classes();
2556 
2557   bool init_mark_was_synchronous = false; // until proven otherwise
2558   while (_collectorState != Idling) {
2559     if (TraceCMSState) {
2560       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2561         Thread::current(), _collectorState);
2562     }
2563     switch (_collectorState) {
2564       case InitialMarking:
2565         register_foreground_gc_start(cause);
2566         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2567         checkpointRootsInitial(false);
2568         assert(_collectorState == Marking, "Collector state should have changed"
2569           " within checkpointRootsInitial()");
2570         break;
2571       case Marking:
2572         // initial marking in checkpointRootsInitialWork has been completed
2573         if (VerifyDuringGC &&
2574             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2575           Universe::verify("Verify before initial mark: ");
2576         }
2577         {
2578           bool res = markFromRoots(false);
2579           assert(res && _collectorState == FinalMarking, "Collector state should "
2580             "have changed");
2581           break;
2582         }
2583       case FinalMarking:
2584         if (VerifyDuringGC &&
2585             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2586           Universe::verify("Verify before re-mark: ");
2587         }
2588         checkpointRootsFinal(false, clear_all_soft_refs,
2589                              init_mark_was_synchronous);
2590         assert(_collectorState == Sweeping, "Collector state should not "
2591           "have changed within checkpointRootsFinal()");
2592         break;
2593       case Sweeping:
2594         // final marking in checkpointRootsFinal has been completed
2595         if (VerifyDuringGC &&
2596             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2597           Universe::verify("Verify before sweep: ");
2598         }
2599         sweep(false);
2600         assert(_collectorState == Resizing, "Incorrect state");
2601         break;
2602       case Resizing: {
2603         // Sweeping has been completed; the actual resize in this case
2604         // is done separately; nothing to be done in this state.
2605         _collectorState = Resetting;
2606         break;
2607       }
2608       case Resetting:
2609         // The heap has been resized.
2610         if (VerifyDuringGC &&
2611             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2612           Universe::verify("Verify before reset: ");
2613         }
2614         save_heap_summary();
2615         reset(false);
2616         assert(_collectorState == Idling, "Collector state should "
2617           "have changed");
2618         break;
2619       case Precleaning:
2620       case AbortablePreclean:
2621         // Elide the preclean phase
2622         _collectorState = FinalMarking;
2623         break;
2624       default:
2625         ShouldNotReachHere();
2626     }
2627     if (TraceCMSState) {
2628       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2629         Thread::current(), _collectorState);
2630     }
2631   }
2632 
2633   if (UseAdaptiveSizePolicy) {
2634     GenCollectedHeap* gch = GenCollectedHeap::heap();
2635     size_policy()->ms_collection_end(gch->gc_cause());
2636   }
2637 
2638   if (VerifyAfterGC &&
2639       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2640     Universe::verify();
2641   }
2642   if (TraceCMSState) {
2643     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2644       " exiting collection CMS state %d",
2645       Thread::current(), _collectorState);
2646   }
2647 }
2648 
2649 bool CMSCollector::waitForForegroundGC() {
2650   bool res = false;
2651   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2652          "CMS thread should have CMS token");
2653   // Block the foreground collector until the
2654   // background collectors decides whether to
2655   // yield.
2656   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2657   _foregroundGCShouldWait = true;
2658   if (_foregroundGCIsActive) {
2659     // The background collector yields to the
2660     // foreground collector and returns a value
2661     // indicating that it has yielded.  The foreground
2662     // collector can proceed.
2663     res = true;
2664     _foregroundGCShouldWait = false;
2665     ConcurrentMarkSweepThread::clear_CMS_flag(
2666       ConcurrentMarkSweepThread::CMS_cms_has_token);
2667     ConcurrentMarkSweepThread::set_CMS_flag(
2668       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2669     // Get a possibly blocked foreground thread going
2670     CGC_lock->notify();
2671     if (TraceCMSState) {
2672       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2673         Thread::current(), _collectorState);
2674     }
2675     while (_foregroundGCIsActive) {
2676       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2677     }
2678     ConcurrentMarkSweepThread::set_CMS_flag(
2679       ConcurrentMarkSweepThread::CMS_cms_has_token);
2680     ConcurrentMarkSweepThread::clear_CMS_flag(
2681       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2682   }
2683   if (TraceCMSState) {
2684     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2685       Thread::current(), _collectorState);
2686   }
2687   return res;
2688 }
2689 
2690 // Because of the need to lock the free lists and other structures in
2691 // the collector, common to all the generations that the collector is
2692 // collecting, we need the gc_prologues of individual CMS generations
2693 // delegate to their collector. It may have been simpler had the
2694 // current infrastructure allowed one to call a prologue on a
2695 // collector. In the absence of that we have the generation's
2696 // prologue delegate to the collector, which delegates back
2697 // some "local" work to a worker method in the individual generations
2698 // that it's responsible for collecting, while itself doing any
2699 // work common to all generations it's responsible for. A similar
2700 // comment applies to the  gc_epilogue()'s.
2701 // The role of the variable _between_prologue_and_epilogue is to
2702 // enforce the invocation protocol.
2703 void CMSCollector::gc_prologue(bool full) {
2704   // Call gc_prologue_work() for the CMSGen
2705   // we are responsible for.
2706 
2707   // The following locking discipline assumes that we are only called
2708   // when the world is stopped.
2709   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2710 
2711   // The CMSCollector prologue must call the gc_prologues for the
2712   // "generations" that it's responsible
2713   // for.
2714 
2715   assert(   Thread::current()->is_VM_thread()
2716          || (   CMSScavengeBeforeRemark
2717              && Thread::current()->is_ConcurrentGC_thread()),
2718          "Incorrect thread type for prologue execution");
2719 
2720   if (_between_prologue_and_epilogue) {
2721     // We have already been invoked; this is a gc_prologue delegation
2722     // from yet another CMS generation that we are responsible for, just
2723     // ignore it since all relevant work has already been done.
2724     return;
2725   }
2726 
2727   // set a bit saying prologue has been called; cleared in epilogue
2728   _between_prologue_and_epilogue = true;
2729   // Claim locks for common data structures, then call gc_prologue_work()
2730   // for each CMSGen.
2731 
2732   getFreelistLocks();   // gets free list locks on constituent spaces
2733   bitMapLock()->lock_without_safepoint_check();
2734 
2735   // Should call gc_prologue_work() for all cms gens we are responsible for
2736   bool duringMarking =    _collectorState >= Marking
2737                          && _collectorState < Sweeping;
2738 
2739   // The young collections clear the modified oops state, which tells if
2740   // there are any modified oops in the class. The remark phase also needs
2741   // that information. Tell the young collection to save the union of all
2742   // modified klasses.
2743   if (duringMarking) {
2744     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2745   }
2746 
2747   bool registerClosure = duringMarking;
2748 
2749   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2750                                                &_modUnionClosurePar
2751                                                : &_modUnionClosure;
2752   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2753 
2754   if (!full) {
2755     stats().record_gc0_begin();
2756   }
2757 }
2758 
2759 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2760 
2761   _capacity_at_prologue = capacity();
2762   _used_at_prologue = used();
2763 
2764   // Delegate to CMScollector which knows how to coordinate between
2765   // this and any other CMS generations that it is responsible for
2766   // collecting.
2767   collector()->gc_prologue(full);
2768 }
2769 
2770 // This is a "private" interface for use by this generation's CMSCollector.
2771 // Not to be called directly by any other entity (for instance,
2772 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2773 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2774   bool registerClosure, ModUnionClosure* modUnionClosure) {
2775   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2776   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2777     "Should be NULL");
2778   if (registerClosure) {
2779     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2780   }
2781   cmsSpace()->gc_prologue();
2782   // Clear stat counters
2783   NOT_PRODUCT(
2784     assert(_numObjectsPromoted == 0, "check");
2785     assert(_numWordsPromoted   == 0, "check");
2786     if (Verbose && PrintGC) {
2787       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2788                           SIZE_FORMAT" bytes concurrently",
2789       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2790     }
2791     _numObjectsAllocated = 0;
2792     _numWordsAllocated   = 0;
2793   )
2794 }
2795 
2796 void CMSCollector::gc_epilogue(bool full) {
2797   // The following locking discipline assumes that we are only called
2798   // when the world is stopped.
2799   assert(SafepointSynchronize::is_at_safepoint(),
2800          "world is stopped assumption");
2801 
2802   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2803   // if linear allocation blocks need to be appropriately marked to allow the
2804   // the blocks to be parsable. We also check here whether we need to nudge the
2805   // CMS collector thread to start a new cycle (if it's not already active).
2806   assert(   Thread::current()->is_VM_thread()
2807          || (   CMSScavengeBeforeRemark
2808              && Thread::current()->is_ConcurrentGC_thread()),
2809          "Incorrect thread type for epilogue execution");
2810 
2811   if (!_between_prologue_and_epilogue) {
2812     // We have already been invoked; this is a gc_epilogue delegation
2813     // from yet another CMS generation that we are responsible for, just
2814     // ignore it since all relevant work has already been done.
2815     return;
2816   }
2817   assert(haveFreelistLocks(), "must have freelist locks");
2818   assert_lock_strong(bitMapLock());
2819 
2820   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2821 
2822   _cmsGen->gc_epilogue_work(full);
2823 
2824   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2825     // in case sampling was not already enabled, enable it
2826     _start_sampling = true;
2827   }
2828   // reset _eden_chunk_array so sampling starts afresh
2829   _eden_chunk_index = 0;
2830 
2831   size_t cms_used   = _cmsGen->cmsSpace()->used();
2832 
2833   // update performance counters - this uses a special version of
2834   // update_counters() that allows the utilization to be passed as a
2835   // parameter, avoiding multiple calls to used().
2836   //
2837   _cmsGen->update_counters(cms_used);
2838 
2839   if (CMSIncrementalMode) {
2840     icms_update_allocation_limits();
2841   }
2842 
2843   bitMapLock()->unlock();
2844   releaseFreelistLocks();
2845 
2846   if (!CleanChunkPoolAsync) {
2847     Chunk::clean_chunk_pool();
2848   }
2849 
2850   set_did_compact(false);
2851   _between_prologue_and_epilogue = false;  // ready for next cycle
2852 }
2853 
2854 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2855   collector()->gc_epilogue(full);
2856 
2857   // Also reset promotion tracking in par gc thread states.
2858   if (CollectedHeap::use_parallel_gc_threads()) {
2859     for (uint i = 0; i < ParallelGCThreads; i++) {
2860       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2861     }
2862   }
2863 }
2864 
2865 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2866   assert(!incremental_collection_failed(), "Should have been cleared");
2867   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2868   cmsSpace()->gc_epilogue();
2869     // Print stat counters
2870   NOT_PRODUCT(
2871     assert(_numObjectsAllocated == 0, "check");
2872     assert(_numWordsAllocated == 0, "check");
2873     if (Verbose && PrintGC) {
2874       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2875                           SIZE_FORMAT" bytes",
2876                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2877     }
2878     _numObjectsPromoted = 0;
2879     _numWordsPromoted   = 0;
2880   )
2881 
2882   if (PrintGC && Verbose) {
2883     // Call down the chain in contiguous_available needs the freelistLock
2884     // so print this out before releasing the freeListLock.
2885     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2886                         contiguous_available());
2887   }
2888 }
2889 
2890 #ifndef PRODUCT
2891 bool CMSCollector::have_cms_token() {
2892   Thread* thr = Thread::current();
2893   if (thr->is_VM_thread()) {
2894     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2895   } else if (thr->is_ConcurrentGC_thread()) {
2896     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2897   } else if (thr->is_GC_task_thread()) {
2898     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2899            ParGCRareEvent_lock->owned_by_self();
2900   }
2901   return false;
2902 }
2903 #endif
2904 
2905 // Check reachability of the given heap address in CMS generation,
2906 // treating all other generations as roots.
2907 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2908   // We could "guarantee" below, rather than assert, but I'll
2909   // leave these as "asserts" so that an adventurous debugger
2910   // could try this in the product build provided some subset of
2911   // the conditions were met, provided they were interested in the
2912   // results and knew that the computation below wouldn't interfere
2913   // with other concurrent computations mutating the structures
2914   // being read or written.
2915   assert(SafepointSynchronize::is_at_safepoint(),
2916          "Else mutations in object graph will make answer suspect");
2917   assert(have_cms_token(), "Should hold cms token");
2918   assert(haveFreelistLocks(), "must hold free list locks");
2919   assert_lock_strong(bitMapLock());
2920 
2921   // Clear the marking bit map array before starting, but, just
2922   // for kicks, first report if the given address is already marked
2923   gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", addr,
2924                 _markBitMap.isMarked(addr) ? "" : " not");
2925 
2926   if (verify_after_remark()) {
2927     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2928     bool result = verification_mark_bm()->isMarked(addr);
2929     gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", addr,
2930                            result ? "IS" : "is NOT");
2931     return result;
2932   } else {
2933     gclog_or_tty->print_cr("Could not compute result");
2934     return false;
2935   }
2936 }
2937 
2938 
2939 void
2940 CMSCollector::print_on_error(outputStream* st) {
2941   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2942   if (collector != NULL) {
2943     CMSBitMap* bitmap = &collector->_markBitMap;
2944     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2945     bitmap->print_on_error(st, " Bits: ");
2946 
2947     st->cr();
2948 
2949     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2950     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2951     mut_bitmap->print_on_error(st, " Bits: ");
2952   }
2953 }
2954 
2955 ////////////////////////////////////////////////////////
2956 // CMS Verification Support
2957 ////////////////////////////////////////////////////////
2958 // Following the remark phase, the following invariant
2959 // should hold -- each object in the CMS heap which is
2960 // marked in markBitMap() should be marked in the verification_mark_bm().
2961 
2962 class VerifyMarkedClosure: public BitMapClosure {
2963   CMSBitMap* _marks;
2964   bool       _failed;
2965 
2966  public:
2967   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2968 
2969   bool do_bit(size_t offset) {
2970     HeapWord* addr = _marks->offsetToHeapWord(offset);
2971     if (!_marks->isMarked(addr)) {
2972       oop(addr)->print_on(gclog_or_tty);
2973       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2974       _failed = true;
2975     }
2976     return true;
2977   }
2978 
2979   bool failed() { return _failed; }
2980 };
2981 
2982 bool CMSCollector::verify_after_remark(bool silent) {
2983   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2984   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2985   static bool init = false;
2986 
2987   assert(SafepointSynchronize::is_at_safepoint(),
2988          "Else mutations in object graph will make answer suspect");
2989   assert(have_cms_token(),
2990          "Else there may be mutual interference in use of "
2991          " verification data structures");
2992   assert(_collectorState > Marking && _collectorState <= Sweeping,
2993          "Else marking info checked here may be obsolete");
2994   assert(haveFreelistLocks(), "must hold free list locks");
2995   assert_lock_strong(bitMapLock());
2996 
2997 
2998   // Allocate marking bit map if not already allocated
2999   if (!init) { // first time
3000     if (!verification_mark_bm()->allocate(_span)) {
3001       return false;
3002     }
3003     init = true;
3004   }
3005 
3006   assert(verification_mark_stack()->isEmpty(), "Should be empty");
3007 
3008   // Turn off refs discovery -- so we will be tracing through refs.
3009   // This is as intended, because by this time
3010   // GC must already have cleared any refs that need to be cleared,
3011   // and traced those that need to be marked; moreover,
3012   // the marking done here is not going to interfere in any
3013   // way with the marking information used by GC.
3014   NoRefDiscovery no_discovery(ref_processor());
3015 
3016   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3017 
3018   // Clear any marks from a previous round
3019   verification_mark_bm()->clear_all();
3020   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
3021   verify_work_stacks_empty();
3022 
3023   GenCollectedHeap* gch = GenCollectedHeap::heap();
3024   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3025   // Update the saved marks which may affect the root scans.
3026   gch->save_marks();
3027 
3028   if (CMSRemarkVerifyVariant == 1) {
3029     // In this first variant of verification, we complete
3030     // all marking, then check if the new marks-vector is
3031     // a subset of the CMS marks-vector.
3032     verify_after_remark_work_1();
3033   } else if (CMSRemarkVerifyVariant == 2) {
3034     // In this second variant of verification, we flag an error
3035     // (i.e. an object reachable in the new marks-vector not reachable
3036     // in the CMS marks-vector) immediately, also indicating the
3037     // identify of an object (A) that references the unmarked object (B) --
3038     // presumably, a mutation to A failed to be picked up by preclean/remark?
3039     verify_after_remark_work_2();
3040   } else {
3041     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
3042             CMSRemarkVerifyVariant);
3043   }
3044   if (!silent) gclog_or_tty->print(" done] ");
3045   return true;
3046 }
3047 
3048 void CMSCollector::verify_after_remark_work_1() {
3049   ResourceMark rm;
3050   HandleMark  hm;
3051   GenCollectedHeap* gch = GenCollectedHeap::heap();
3052 
3053   // Get a clear set of claim bits for the strong roots processing to work with.
3054   ClassLoaderDataGraph::clear_claimed_marks();
3055 
3056   // Mark from roots one level into CMS
3057   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3058   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3059 
3060   gch->gen_process_strong_roots(_cmsGen->level(),
3061                                 true,   // younger gens are roots
3062                                 true,   // activate StrongRootsScope
3063                                 SharedHeap::ScanningOption(roots_scanning_options()),
3064                                 &notOlder,
3065                                 NULL,
3066                                 NULL); // SSS: Provide correct closure
3067 
3068   // Now mark from the roots
3069   MarkFromRootsClosure markFromRootsClosure(this, _span,
3070     verification_mark_bm(), verification_mark_stack(),
3071     false /* don't yield */, true /* verifying */);
3072   assert(_restart_addr == NULL, "Expected pre-condition");
3073   verification_mark_bm()->iterate(&markFromRootsClosure);
3074   while (_restart_addr != NULL) {
3075     // Deal with stack overflow: by restarting at the indicated
3076     // address.
3077     HeapWord* ra = _restart_addr;
3078     markFromRootsClosure.reset(ra);
3079     _restart_addr = NULL;
3080     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3081   }
3082   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3083   verify_work_stacks_empty();
3084 
3085   // Marking completed -- now verify that each bit marked in
3086   // verification_mark_bm() is also marked in markBitMap(); flag all
3087   // errors by printing corresponding objects.
3088   VerifyMarkedClosure vcl(markBitMap());
3089   verification_mark_bm()->iterate(&vcl);
3090   if (vcl.failed()) {
3091     gclog_or_tty->print("Verification failed");
3092     Universe::heap()->print_on(gclog_or_tty);
3093     fatal("CMS: failed marking verification after remark");
3094   }
3095 }
3096 
3097 class VerifyKlassOopsKlassClosure : public KlassClosure {
3098   class VerifyKlassOopsClosure : public OopClosure {
3099     CMSBitMap* _bitmap;
3100    public:
3101     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3102     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3103     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3104   } _oop_closure;
3105  public:
3106   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3107   void do_klass(Klass* k) {
3108     k->oops_do(&_oop_closure);
3109   }
3110 };
3111 
3112 void CMSCollector::verify_after_remark_work_2() {
3113   ResourceMark rm;
3114   HandleMark  hm;
3115   GenCollectedHeap* gch = GenCollectedHeap::heap();
3116 
3117   // Get a clear set of claim bits for the strong roots processing to work with.
3118   ClassLoaderDataGraph::clear_claimed_marks();
3119 
3120   // Mark from roots one level into CMS
3121   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3122                                      markBitMap());
3123   CMKlassClosure klass_closure(&notOlder);
3124 
3125   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3126   gch->gen_process_strong_roots(_cmsGen->level(),
3127                                 true,   // younger gens are roots
3128                                 true,   // activate StrongRootsScope
3129                                 SharedHeap::ScanningOption(roots_scanning_options()),
3130                                 &notOlder,
3131                                 NULL,
3132                                 &klass_closure);
3133 
3134   // Now mark from the roots
3135   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3136     verification_mark_bm(), markBitMap(), verification_mark_stack());
3137   assert(_restart_addr == NULL, "Expected pre-condition");
3138   verification_mark_bm()->iterate(&markFromRootsClosure);
3139   while (_restart_addr != NULL) {
3140     // Deal with stack overflow: by restarting at the indicated
3141     // address.
3142     HeapWord* ra = _restart_addr;
3143     markFromRootsClosure.reset(ra);
3144     _restart_addr = NULL;
3145     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3146   }
3147   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3148   verify_work_stacks_empty();
3149 
3150   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3151   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3152 
3153   // Marking completed -- now verify that each bit marked in
3154   // verification_mark_bm() is also marked in markBitMap(); flag all
3155   // errors by printing corresponding objects.
3156   VerifyMarkedClosure vcl(markBitMap());
3157   verification_mark_bm()->iterate(&vcl);
3158   assert(!vcl.failed(), "Else verification above should not have succeeded");
3159 }
3160 
3161 void ConcurrentMarkSweepGeneration::save_marks() {
3162   // delegate to CMS space
3163   cmsSpace()->save_marks();
3164   for (uint i = 0; i < ParallelGCThreads; i++) {
3165     _par_gc_thread_states[i]->promo.startTrackingPromotions();
3166   }
3167 }
3168 
3169 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3170   return cmsSpace()->no_allocs_since_save_marks();
3171 }
3172 
3173 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
3174                                                                 \
3175 void ConcurrentMarkSweepGeneration::                            \
3176 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
3177   cl->set_generation(this);                                     \
3178   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
3179   cl->reset_generation();                                       \
3180   save_marks();                                                 \
3181 }
3182 
3183 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3184 
3185 void
3186 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3187   cl->set_generation(this);
3188   younger_refs_in_space_iterate(_cmsSpace, cl);
3189   cl->reset_generation();
3190 }
3191 
3192 void
3193 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3194   if (freelistLock()->owned_by_self()) {
3195     Generation::oop_iterate(cl);
3196   } else {
3197     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3198     Generation::oop_iterate(cl);
3199   }
3200 }
3201 
3202 void
3203 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3204   if (freelistLock()->owned_by_self()) {
3205     Generation::object_iterate(cl);
3206   } else {
3207     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3208     Generation::object_iterate(cl);
3209   }
3210 }
3211 
3212 void
3213 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3214   if (freelistLock()->owned_by_self()) {
3215     Generation::safe_object_iterate(cl);
3216   } else {
3217     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3218     Generation::safe_object_iterate(cl);
3219   }
3220 }
3221 
3222 void
3223 ConcurrentMarkSweepGeneration::post_compact() {
3224 }
3225 
3226 void
3227 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3228   // Fix the linear allocation blocks to look like free blocks.
3229 
3230   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3231   // are not called when the heap is verified during universe initialization and
3232   // at vm shutdown.
3233   if (freelistLock()->owned_by_self()) {
3234     cmsSpace()->prepare_for_verify();
3235   } else {
3236     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3237     cmsSpace()->prepare_for_verify();
3238   }
3239 }
3240 
3241 void
3242 ConcurrentMarkSweepGeneration::verify() {
3243   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3244   // are not called when the heap is verified during universe initialization and
3245   // at vm shutdown.
3246   if (freelistLock()->owned_by_self()) {
3247     cmsSpace()->verify();
3248   } else {
3249     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3250     cmsSpace()->verify();
3251   }
3252 }
3253 
3254 void CMSCollector::verify() {
3255   _cmsGen->verify();
3256 }
3257 
3258 #ifndef PRODUCT
3259 bool CMSCollector::overflow_list_is_empty() const {
3260   assert(_num_par_pushes >= 0, "Inconsistency");
3261   if (_overflow_list == NULL) {
3262     assert(_num_par_pushes == 0, "Inconsistency");
3263   }
3264   return _overflow_list == NULL;
3265 }
3266 
3267 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3268 // merely consolidate assertion checks that appear to occur together frequently.
3269 void CMSCollector::verify_work_stacks_empty() const {
3270   assert(_markStack.isEmpty(), "Marking stack should be empty");
3271   assert(overflow_list_is_empty(), "Overflow list should be empty");
3272 }
3273 
3274 void CMSCollector::verify_overflow_empty() const {
3275   assert(overflow_list_is_empty(), "Overflow list should be empty");
3276   assert(no_preserved_marks(), "No preserved marks");
3277 }
3278 #endif // PRODUCT
3279 
3280 // Decide if we want to enable class unloading as part of the
3281 // ensuing concurrent GC cycle. We will collect and
3282 // unload classes if it's the case that:
3283 // (1) an explicit gc request has been made and the flag
3284 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3285 // (2) (a) class unloading is enabled at the command line, and
3286 //     (b) old gen is getting really full
3287 // NOTE: Provided there is no change in the state of the heap between
3288 // calls to this method, it should have idempotent results. Moreover,
3289 // its results should be monotonically increasing (i.e. going from 0 to 1,
3290 // but not 1 to 0) between successive calls between which the heap was
3291 // not collected. For the implementation below, it must thus rely on
3292 // the property that concurrent_cycles_since_last_unload()
3293 // will not decrease unless a collection cycle happened and that
3294 // _cmsGen->is_too_full() are
3295 // themselves also monotonic in that sense. See check_monotonicity()
3296 // below.
3297 void CMSCollector::update_should_unload_classes() {
3298   _should_unload_classes = false;
3299   // Condition 1 above
3300   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3301     _should_unload_classes = true;
3302   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3303     // Disjuncts 2.b.(i,ii,iii) above
3304     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3305                               CMSClassUnloadingMaxInterval)
3306                            || _cmsGen->is_too_full();
3307   }
3308 }
3309 
3310 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3311   bool res = should_concurrent_collect();
3312   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3313   return res;
3314 }
3315 
3316 void CMSCollector::setup_cms_unloading_and_verification_state() {
3317   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3318                              || VerifyBeforeExit;
3319   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache;
3320 
3321   // We set the proper root for this CMS cycle here.
3322   if (should_unload_classes()) {   // Should unload classes this cycle
3323     remove_root_scanning_option(SharedHeap::SO_AllClasses);
3324     add_root_scanning_option(SharedHeap::SO_SystemClasses);
3325     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3326     set_verifying(should_verify);    // Set verification state for this cycle
3327     return;                            // Nothing else needs to be done at this time
3328   }
3329 
3330   // Not unloading classes this cycle
3331   assert(!should_unload_classes(), "Inconsistency!");
3332   remove_root_scanning_option(SharedHeap::SO_SystemClasses);
3333   add_root_scanning_option(SharedHeap::SO_AllClasses);
3334 
3335   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3336     // Include symbols, strings and code cache elements to prevent their resurrection.
3337     add_root_scanning_option(rso);
3338     set_verifying(true);
3339   } else if (verifying() && !should_verify) {
3340     // We were verifying, but some verification flags got disabled.
3341     set_verifying(false);
3342     // Exclude symbols, strings and code cache elements from root scanning to
3343     // reduce IM and RM pauses.
3344     remove_root_scanning_option(rso);
3345   }
3346 }
3347 
3348 
3349 #ifndef PRODUCT
3350 HeapWord* CMSCollector::block_start(const void* p) const {
3351   const HeapWord* addr = (HeapWord*)p;
3352   if (_span.contains(p)) {
3353     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3354       return _cmsGen->cmsSpace()->block_start(p);
3355     }
3356   }
3357   return NULL;
3358 }
3359 #endif
3360 
3361 HeapWord*
3362 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3363                                                    bool   tlab,
3364                                                    bool   parallel) {
3365   CMSSynchronousYieldRequest yr;
3366   assert(!tlab, "Can't deal with TLAB allocation");
3367   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3368   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3369     CMSExpansionCause::_satisfy_allocation);
3370   if (GCExpandToAllocateDelayMillis > 0) {
3371     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3372   }
3373   return have_lock_and_allocate(word_size, tlab);
3374 }
3375 
3376 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3377 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3378 // to CardGeneration and share it...
3379 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3380   return CardGeneration::expand(bytes, expand_bytes);
3381 }
3382 
3383 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3384   CMSExpansionCause::Cause cause)
3385 {
3386 
3387   bool success = expand(bytes, expand_bytes);
3388 
3389   // remember why we expanded; this information is used
3390   // by shouldConcurrentCollect() when making decisions on whether to start
3391   // a new CMS cycle.
3392   if (success) {
3393     set_expansion_cause(cause);
3394     if (PrintGCDetails && Verbose) {
3395       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3396         CMSExpansionCause::to_string(cause));
3397     }
3398   }
3399 }
3400 
3401 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3402   HeapWord* res = NULL;
3403   MutexLocker x(ParGCRareEvent_lock);
3404   while (true) {
3405     // Expansion by some other thread might make alloc OK now:
3406     res = ps->lab.alloc(word_sz);
3407     if (res != NULL) return res;
3408     // If there's not enough expansion space available, give up.
3409     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3410       return NULL;
3411     }
3412     // Otherwise, we try expansion.
3413     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3414       CMSExpansionCause::_allocate_par_lab);
3415     // Now go around the loop and try alloc again;
3416     // A competing par_promote might beat us to the expansion space,
3417     // so we may go around the loop again if promotion fails again.
3418     if (GCExpandToAllocateDelayMillis > 0) {
3419       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3420     }
3421   }
3422 }
3423 
3424 
3425 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3426   PromotionInfo* promo) {
3427   MutexLocker x(ParGCRareEvent_lock);
3428   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3429   while (true) {
3430     // Expansion by some other thread might make alloc OK now:
3431     if (promo->ensure_spooling_space()) {
3432       assert(promo->has_spooling_space(),
3433              "Post-condition of successful ensure_spooling_space()");
3434       return true;
3435     }
3436     // If there's not enough expansion space available, give up.
3437     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3438       return false;
3439     }
3440     // Otherwise, we try expansion.
3441     expand(refill_size_bytes, MinHeapDeltaBytes,
3442       CMSExpansionCause::_allocate_par_spooling_space);
3443     // Now go around the loop and try alloc again;
3444     // A competing allocation might beat us to the expansion space,
3445     // so we may go around the loop again if allocation fails again.
3446     if (GCExpandToAllocateDelayMillis > 0) {
3447       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3448     }
3449   }
3450 }
3451 
3452 
3453 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3454   assert_locked_or_safepoint(ExpandHeap_lock);
3455   // Shrink committed space
3456   _virtual_space.shrink_by(bytes);
3457   // Shrink space; this also shrinks the space's BOT
3458   _cmsSpace->set_end((HeapWord*) _virtual_space.high());
3459   size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3460   // Shrink the shared block offset array
3461   _bts->resize(new_word_size);
3462   MemRegion mr(_cmsSpace->bottom(), new_word_size);
3463   // Shrink the card table
3464   Universe::heap()->barrier_set()->resize_covered_region(mr);
3465 
3466   if (Verbose && PrintGC) {
3467     size_t new_mem_size = _virtual_space.committed_size();
3468     size_t old_mem_size = new_mem_size + bytes;
3469     gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3470                   name(), old_mem_size/K, new_mem_size/K);
3471   }
3472 }
3473 
3474 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3475   assert_locked_or_safepoint(Heap_lock);
3476   size_t size = ReservedSpace::page_align_size_down(bytes);
3477   // Only shrink if a compaction was done so that all the free space
3478   // in the generation is in a contiguous block at the end.
3479   if (size > 0 && did_compact()) {
3480     shrink_by(size);
3481   }
3482 }
3483 
3484 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3485   assert_locked_or_safepoint(Heap_lock);
3486   bool result = _virtual_space.expand_by(bytes);
3487   if (result) {
3488     size_t new_word_size =
3489       heap_word_size(_virtual_space.committed_size());
3490     MemRegion mr(_cmsSpace->bottom(), new_word_size);
3491     _bts->resize(new_word_size);  // resize the block offset shared array
3492     Universe::heap()->barrier_set()->resize_covered_region(mr);
3493     // Hmmmm... why doesn't CFLS::set_end verify locking?
3494     // This is quite ugly; FIX ME XXX
3495     _cmsSpace->assert_locked(freelistLock());
3496     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3497 
3498     // update the space and generation capacity counters
3499     if (UsePerfData) {
3500       _space_counters->update_capacity();
3501       _gen_counters->update_all();
3502     }
3503 
3504     if (Verbose && PrintGC) {
3505       size_t new_mem_size = _virtual_space.committed_size();
3506       size_t old_mem_size = new_mem_size - bytes;
3507       gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3508                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
3509     }
3510   }
3511   return result;
3512 }
3513 
3514 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3515   assert_locked_or_safepoint(Heap_lock);
3516   bool success = true;
3517   const size_t remaining_bytes = _virtual_space.uncommitted_size();
3518   if (remaining_bytes > 0) {
3519     success = grow_by(remaining_bytes);
3520     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3521   }
3522   return success;
3523 }
3524 
3525 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3526   assert_locked_or_safepoint(Heap_lock);
3527   assert_lock_strong(freelistLock());
3528   if (PrintGCDetails && Verbose) {
3529     warning("Shrinking of CMS not yet implemented");
3530   }
3531   return;
3532 }
3533 
3534 
3535 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3536 // phases.
3537 class CMSPhaseAccounting: public StackObj {
3538  public:
3539   CMSPhaseAccounting(CMSCollector *collector,
3540                      const char *phase,
3541                      bool print_cr = true);
3542   ~CMSPhaseAccounting();
3543 
3544  private:
3545   CMSCollector *_collector;
3546   const char *_phase;
3547   elapsedTimer _wallclock;
3548   bool _print_cr;
3549 
3550  public:
3551   // Not MT-safe; so do not pass around these StackObj's
3552   // where they may be accessed by other threads.
3553   jlong wallclock_millis() {
3554     assert(_wallclock.is_active(), "Wall clock should not stop");
3555     _wallclock.stop();  // to record time
3556     jlong ret = _wallclock.milliseconds();
3557     _wallclock.start(); // restart
3558     return ret;
3559   }
3560 };
3561 
3562 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3563                                        const char *phase,
3564                                        bool print_cr) :
3565   _collector(collector), _phase(phase), _print_cr(print_cr) {
3566 
3567   if (PrintCMSStatistics != 0) {
3568     _collector->resetYields();
3569   }
3570   if (PrintGCDetails) {
3571     gclog_or_tty->date_stamp(PrintGCDateStamps);
3572     gclog_or_tty->stamp(PrintGCTimeStamps);
3573     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3574       _collector->cmsGen()->short_name(), _phase);
3575   }
3576   _collector->resetTimer();
3577   _wallclock.start();
3578   _collector->startTimer();
3579 }
3580 
3581 CMSPhaseAccounting::~CMSPhaseAccounting() {
3582   assert(_wallclock.is_active(), "Wall clock should not have stopped");
3583   _collector->stopTimer();
3584   _wallclock.stop();
3585   if (PrintGCDetails) {
3586     gclog_or_tty->date_stamp(PrintGCDateStamps);
3587     gclog_or_tty->stamp(PrintGCTimeStamps);
3588     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3589                  _collector->cmsGen()->short_name(),
3590                  _phase, _collector->timerValue(), _wallclock.seconds());
3591     if (_print_cr) {
3592       gclog_or_tty->cr();
3593     }
3594     if (PrintCMSStatistics != 0) {
3595       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3596                     _collector->yields());
3597     }
3598   }
3599 }
3600 
3601 // CMS work
3602 
3603 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3604 class CMSParMarkTask : public AbstractGangTask {
3605  protected:
3606   CMSCollector*     _collector;
3607   int               _n_workers;
3608   CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3609       AbstractGangTask(name),
3610       _collector(collector),
3611       _n_workers(n_workers) {}
3612   // Work method in support of parallel rescan ... of young gen spaces
3613   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3614                              ContiguousSpace* space,
3615                              HeapWord** chunk_array, size_t chunk_top);
3616   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3617 };
3618 
3619 // Parallel initial mark task
3620 class CMSParInitialMarkTask: public CMSParMarkTask {
3621  public:
3622   CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3623       CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3624                      collector, n_workers) {}
3625   void work(uint worker_id);
3626 };
3627 
3628 // Checkpoint the roots into this generation from outside
3629 // this generation. [Note this initial checkpoint need only
3630 // be approximate -- we'll do a catch up phase subsequently.]
3631 void CMSCollector::checkpointRootsInitial(bool asynch) {
3632   assert(_collectorState == InitialMarking, "Wrong collector state");
3633   check_correct_thread_executing();
3634   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3635 
3636   save_heap_summary();
3637   report_heap_summary(GCWhen::BeforeGC);
3638 
3639   ReferenceProcessor* rp = ref_processor();
3640   SpecializationStats::clear();
3641   assert(_restart_addr == NULL, "Control point invariant");
3642   if (asynch) {
3643     // acquire locks for subsequent manipulations
3644     MutexLockerEx x(bitMapLock(),
3645                     Mutex::_no_safepoint_check_flag);
3646     checkpointRootsInitialWork(asynch);
3647     // enable ("weak") refs discovery
3648     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3649     _collectorState = Marking;
3650   } else {
3651     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3652     // which recognizes if we are a CMS generation, and doesn't try to turn on
3653     // discovery; verify that they aren't meddling.
3654     assert(!rp->discovery_is_atomic(),
3655            "incorrect setting of discovery predicate");
3656     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3657            "ref discovery for this generation kind");
3658     // already have locks
3659     checkpointRootsInitialWork(asynch);
3660     // now enable ("weak") refs discovery
3661     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3662     _collectorState = Marking;
3663   }
3664   SpecializationStats::print();
3665 }
3666 
3667 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3668   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3669   assert(_collectorState == InitialMarking, "just checking");
3670 
3671   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3672   // precede our marking with a collection of all
3673   // younger generations to keep floating garbage to a minimum.
3674   // XXX: we won't do this for now -- it's an optimization to be done later.
3675 
3676   // already have locks
3677   assert_lock_strong(bitMapLock());
3678   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3679 
3680   // Setup the verification and class unloading state for this
3681   // CMS collection cycle.
3682   setup_cms_unloading_and_verification_state();
3683 
3684   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3685     PrintGCDetails && Verbose, true, _gc_timer_cm);)
3686   if (UseAdaptiveSizePolicy) {
3687     size_policy()->checkpoint_roots_initial_begin();
3688   }
3689 
3690   // Reset all the PLAB chunk arrays if necessary.
3691   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3692     reset_survivor_plab_arrays();
3693   }
3694 
3695   ResourceMark rm;
3696   HandleMark  hm;
3697 
3698   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3699   GenCollectedHeap* gch = GenCollectedHeap::heap();
3700 
3701   verify_work_stacks_empty();
3702   verify_overflow_empty();
3703 
3704   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3705   // Update the saved marks which may affect the root scans.
3706   gch->save_marks();
3707 
3708   // weak reference processing has not started yet.
3709   ref_processor()->set_enqueuing_is_done(false);
3710 
3711   // Need to remember all newly created CLDs,
3712   // so that we can guarantee that the remark finds them.
3713   ClassLoaderDataGraph::remember_new_clds(true);
3714 
3715   // Whenever a CLD is found, it will be claimed before proceeding to mark
3716   // the klasses. The claimed marks need to be cleared before marking starts.
3717   ClassLoaderDataGraph::clear_claimed_marks();
3718 
3719   if (CMSPrintEdenSurvivorChunks) {
3720     print_eden_and_survivor_chunk_arrays();
3721   }
3722 
3723   {
3724     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3725     if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3726       // The parallel version.
3727       FlexibleWorkGang* workers = gch->workers();
3728       assert(workers != NULL, "Need parallel worker threads.");
3729       int n_workers = workers->active_workers();
3730       CMSParInitialMarkTask tsk(this, n_workers);
3731       gch->set_par_threads(n_workers);
3732       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3733       if (n_workers > 1) {
3734         GenCollectedHeap::StrongRootsScope srs(gch);
3735         workers->run_task(&tsk);
3736       } else {
3737         GenCollectedHeap::StrongRootsScope srs(gch);
3738         tsk.work(0);
3739       }
3740       gch->set_par_threads(0);
3741     } else {
3742       // The serial version.
3743       CMKlassClosure klass_closure(&notOlder);
3744       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3745       gch->gen_process_strong_roots(_cmsGen->level(),
3746                                     true,   // younger gens are roots
3747                                     true,   // activate StrongRootsScope
3748                                     SharedHeap::ScanningOption(roots_scanning_options()),
3749                                     &notOlder,
3750                                     NULL,
3751                                     &klass_closure);
3752     }
3753   }
3754 
3755   // Clear mod-union table; it will be dirtied in the prologue of
3756   // CMS generation per each younger generation collection.
3757 
3758   assert(_modUnionTable.isAllClear(),
3759        "Was cleared in most recent final checkpoint phase"
3760        " or no bits are set in the gc_prologue before the start of the next "
3761        "subsequent marking phase.");
3762 
3763   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3764 
3765   // Save the end of the used_region of the constituent generations
3766   // to be used to limit the extent of sweep in each generation.
3767   save_sweep_limits();
3768   if (UseAdaptiveSizePolicy) {
3769     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3770   }
3771   verify_overflow_empty();
3772 }
3773 
3774 bool CMSCollector::markFromRoots(bool asynch) {
3775   // we might be tempted to assert that:
3776   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3777   //        "inconsistent argument?");
3778   // However that wouldn't be right, because it's possible that
3779   // a safepoint is indeed in progress as a younger generation
3780   // stop-the-world GC happens even as we mark in this generation.
3781   assert(_collectorState == Marking, "inconsistent state?");
3782   check_correct_thread_executing();
3783   verify_overflow_empty();
3784 
3785   bool res;
3786   if (asynch) {
3787 
3788     // Start the timers for adaptive size policy for the concurrent phases
3789     // Do it here so that the foreground MS can use the concurrent
3790     // timer since a foreground MS might has the sweep done concurrently
3791     // or STW.
3792     if (UseAdaptiveSizePolicy) {
3793       size_policy()->concurrent_marking_begin();
3794     }
3795 
3796     // Weak ref discovery note: We may be discovering weak
3797     // refs in this generation concurrent (but interleaved) with
3798     // weak ref discovery by a younger generation collector.
3799 
3800     CMSTokenSyncWithLocks ts(true, bitMapLock());
3801     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3802     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3803     res = markFromRootsWork(asynch);
3804     if (res) {
3805       _collectorState = Precleaning;
3806     } else { // We failed and a foreground collection wants to take over
3807       assert(_foregroundGCIsActive, "internal state inconsistency");
3808       assert(_restart_addr == NULL,  "foreground will restart from scratch");
3809       if (PrintGCDetails) {
3810         gclog_or_tty->print_cr("bailing out to foreground collection");
3811       }
3812     }
3813     if (UseAdaptiveSizePolicy) {
3814       size_policy()->concurrent_marking_end();
3815     }
3816   } else {
3817     assert(SafepointSynchronize::is_at_safepoint(),
3818            "inconsistent with asynch == false");
3819     if (UseAdaptiveSizePolicy) {
3820       size_policy()->ms_collection_marking_begin();
3821     }
3822     // already have locks
3823     res = markFromRootsWork(asynch);
3824     _collectorState = FinalMarking;
3825     if (UseAdaptiveSizePolicy) {
3826       GenCollectedHeap* gch = GenCollectedHeap::heap();
3827       size_policy()->ms_collection_marking_end(gch->gc_cause());
3828     }
3829   }
3830   verify_overflow_empty();
3831   return res;
3832 }
3833 
3834 bool CMSCollector::markFromRootsWork(bool asynch) {
3835   // iterate over marked bits in bit map, doing a full scan and mark
3836   // from these roots using the following algorithm:
3837   // . if oop is to the right of the current scan pointer,
3838   //   mark corresponding bit (we'll process it later)
3839   // . else (oop is to left of current scan pointer)
3840   //   push oop on marking stack
3841   // . drain the marking stack
3842 
3843   // Note that when we do a marking step we need to hold the
3844   // bit map lock -- recall that direct allocation (by mutators)
3845   // and promotion (by younger generation collectors) is also
3846   // marking the bit map. [the so-called allocate live policy.]
3847   // Because the implementation of bit map marking is not
3848   // robust wrt simultaneous marking of bits in the same word,
3849   // we need to make sure that there is no such interference
3850   // between concurrent such updates.
3851 
3852   // already have locks
3853   assert_lock_strong(bitMapLock());
3854 
3855   verify_work_stacks_empty();
3856   verify_overflow_empty();
3857   bool result = false;
3858   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3859     result = do_marking_mt(asynch);
3860   } else {
3861     result = do_marking_st(asynch);
3862   }
3863   return result;
3864 }
3865 
3866 // Forward decl
3867 class CMSConcMarkingTask;
3868 
3869 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3870   CMSCollector*       _collector;
3871   CMSConcMarkingTask* _task;
3872  public:
3873   virtual void yield();
3874 
3875   // "n_threads" is the number of threads to be terminated.
3876   // "queue_set" is a set of work queues of other threads.
3877   // "collector" is the CMS collector associated with this task terminator.
3878   // "yield" indicates whether we need the gang as a whole to yield.
3879   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3880     ParallelTaskTerminator(n_threads, queue_set),
3881     _collector(collector) { }
3882 
3883   void set_task(CMSConcMarkingTask* task) {
3884     _task = task;
3885   }
3886 };
3887 
3888 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3889   CMSConcMarkingTask* _task;
3890  public:
3891   bool should_exit_termination();
3892   void set_task(CMSConcMarkingTask* task) {
3893     _task = task;
3894   }
3895 };
3896 
3897 // MT Concurrent Marking Task
3898 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3899   CMSCollector* _collector;
3900   int           _n_workers;                  // requested/desired # workers
3901   bool          _asynch;
3902   bool          _result;
3903   CompactibleFreeListSpace*  _cms_space;
3904   char          _pad_front[64];   // padding to ...
3905   HeapWord*     _global_finger;   // ... avoid sharing cache line
3906   char          _pad_back[64];
3907   HeapWord*     _restart_addr;
3908 
3909   //  Exposed here for yielding support
3910   Mutex* const _bit_map_lock;
3911 
3912   // The per thread work queues, available here for stealing
3913   OopTaskQueueSet*  _task_queues;
3914 
3915   // Termination (and yielding) support
3916   CMSConcMarkingTerminator _term;
3917   CMSConcMarkingTerminatorTerminator _term_term;
3918 
3919  public:
3920   CMSConcMarkingTask(CMSCollector* collector,
3921                  CompactibleFreeListSpace* cms_space,
3922                  bool asynch,
3923                  YieldingFlexibleWorkGang* workers,
3924                  OopTaskQueueSet* task_queues):
3925     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3926     _collector(collector),
3927     _cms_space(cms_space),
3928     _asynch(asynch), _n_workers(0), _result(true),
3929     _task_queues(task_queues),
3930     _term(_n_workers, task_queues, _collector),
3931     _bit_map_lock(collector->bitMapLock())
3932   {
3933     _requested_size = _n_workers;
3934     _term.set_task(this);
3935     _term_term.set_task(this);
3936     _restart_addr = _global_finger = _cms_space->bottom();
3937   }
3938 
3939 
3940   OopTaskQueueSet* task_queues()  { return _task_queues; }
3941 
3942   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3943 
3944   HeapWord** global_finger_addr() { return &_global_finger; }
3945 
3946   CMSConcMarkingTerminator* terminator() { return &_term; }
3947 
3948   virtual void set_for_termination(int active_workers) {
3949     terminator()->reset_for_reuse(active_workers);
3950   }
3951 
3952   void work(uint worker_id);
3953   bool should_yield() {
3954     return    ConcurrentMarkSweepThread::should_yield()
3955            && !_collector->foregroundGCIsActive()
3956            && _asynch;
3957   }
3958 
3959   virtual void coordinator_yield();  // stuff done by coordinator
3960   bool result() { return _result; }
3961 
3962   void reset(HeapWord* ra) {
3963     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3964     _restart_addr = _global_finger = ra;
3965     _term.reset_for_reuse();
3966   }
3967 
3968   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3969                                            OopTaskQueue* work_q);
3970 
3971  private:
3972   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3973   void do_work_steal(int i);
3974   void bump_global_finger(HeapWord* f);
3975 };
3976 
3977 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3978   assert(_task != NULL, "Error");
3979   return _task->yielding();
3980   // Note that we do not need the disjunct || _task->should_yield() above
3981   // because we want terminating threads to yield only if the task
3982   // is already in the midst of yielding, which happens only after at least one
3983   // thread has yielded.
3984 }
3985 
3986 void CMSConcMarkingTerminator::yield() {
3987   if (_task->should_yield()) {
3988     _task->yield();
3989   } else {
3990     ParallelTaskTerminator::yield();
3991   }
3992 }
3993 
3994 ////////////////////////////////////////////////////////////////
3995 // Concurrent Marking Algorithm Sketch
3996 ////////////////////////////////////////////////////////////////
3997 // Until all tasks exhausted (both spaces):
3998 // -- claim next available chunk
3999 // -- bump global finger via CAS
4000 // -- find first object that starts in this chunk
4001 //    and start scanning bitmap from that position
4002 // -- scan marked objects for oops
4003 // -- CAS-mark target, and if successful:
4004 //    . if target oop is above global finger (volatile read)
4005 //      nothing to do
4006 //    . if target oop is in chunk and above local finger
4007 //        then nothing to do
4008 //    . else push on work-queue
4009 // -- Deal with possible overflow issues:
4010 //    . local work-queue overflow causes stuff to be pushed on
4011 //      global (common) overflow queue
4012 //    . always first empty local work queue
4013 //    . then get a batch of oops from global work queue if any
4014 //    . then do work stealing
4015 // -- When all tasks claimed (both spaces)
4016 //    and local work queue empty,
4017 //    then in a loop do:
4018 //    . check global overflow stack; steal a batch of oops and trace
4019 //    . try to steal from other threads oif GOS is empty
4020 //    . if neither is available, offer termination
4021 // -- Terminate and return result
4022 //
4023 void CMSConcMarkingTask::work(uint worker_id) {
4024   elapsedTimer _timer;
4025   ResourceMark rm;
4026   HandleMark hm;
4027 
4028   DEBUG_ONLY(_collector->verify_overflow_empty();)
4029 
4030   // Before we begin work, our work queue should be empty
4031   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4032   // Scan the bitmap covering _cms_space, tracing through grey objects.
4033   _timer.start();
4034   do_scan_and_mark(worker_id, _cms_space);
4035   _timer.stop();
4036   if (PrintCMSStatistics != 0) {
4037     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4038       worker_id, _timer.seconds());
4039       // XXX: need xxx/xxx type of notation, two timers
4040   }
4041 
4042   // ... do work stealing
4043   _timer.reset();
4044   _timer.start();
4045   do_work_steal(worker_id);
4046   _timer.stop();
4047   if (PrintCMSStatistics != 0) {
4048     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4049       worker_id, _timer.seconds());
4050       // XXX: need xxx/xxx type of notation, two timers
4051   }
4052   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4053   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4054   // Note that under the current task protocol, the
4055   // following assertion is true even of the spaces
4056   // expanded since the completion of the concurrent
4057   // marking. XXX This will likely change under a strict
4058   // ABORT semantics.
4059   // After perm removal the comparison was changed to
4060   // greater than or equal to from strictly greater than.
4061   // Before perm removal the highest address sweep would
4062   // have been at the end of perm gen but now is at the
4063   // end of the tenured gen.
4064   assert(_global_finger >=  _cms_space->end(),
4065          "All tasks have been completed");
4066   DEBUG_ONLY(_collector->verify_overflow_empty();)
4067 }
4068 
4069 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4070   HeapWord* read = _global_finger;
4071   HeapWord* cur  = read;
4072   while (f > read) {
4073     cur = read;
4074     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4075     if (cur == read) {
4076       // our cas succeeded
4077       assert(_global_finger >= f, "protocol consistency");
4078       break;
4079     }
4080   }
4081 }
4082 
4083 // This is really inefficient, and should be redone by
4084 // using (not yet available) block-read and -write interfaces to the
4085 // stack and the work_queue. XXX FIX ME !!!
4086 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4087                                                       OopTaskQueue* work_q) {
4088   // Fast lock-free check
4089   if (ovflw_stk->length() == 0) {
4090     return false;
4091   }
4092   assert(work_q->size() == 0, "Shouldn't steal");
4093   MutexLockerEx ml(ovflw_stk->par_lock(),
4094                    Mutex::_no_safepoint_check_flag);
4095   // Grab up to 1/4 the size of the work queue
4096   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4097                     (size_t)ParGCDesiredObjsFromOverflowList);
4098   num = MIN2(num, ovflw_stk->length());
4099   for (int i = (int) num; i > 0; i--) {
4100     oop cur = ovflw_stk->pop();
4101     assert(cur != NULL, "Counted wrong?");
4102     work_q->push(cur);
4103   }
4104   return num > 0;
4105 }
4106 
4107 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4108   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4109   int n_tasks = pst->n_tasks();
4110   // We allow that there may be no tasks to do here because
4111   // we are restarting after a stack overflow.
4112   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4113   uint nth_task = 0;
4114 
4115   HeapWord* aligned_start = sp->bottom();
4116   if (sp->used_region().contains(_restart_addr)) {
4117     // Align down to a card boundary for the start of 0th task
4118     // for this space.
4119     aligned_start =
4120       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4121                                  CardTableModRefBS::card_size);
4122   }
4123 
4124   size_t chunk_size = sp->marking_task_size();
4125   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4126     // Having claimed the nth task in this space,
4127     // compute the chunk that it corresponds to:
4128     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4129                                aligned_start + (nth_task+1)*chunk_size);
4130     // Try and bump the global finger via a CAS;
4131     // note that we need to do the global finger bump
4132     // _before_ taking the intersection below, because
4133     // the task corresponding to that region will be
4134     // deemed done even if the used_region() expands
4135     // because of allocation -- as it almost certainly will
4136     // during start-up while the threads yield in the
4137     // closure below.
4138     HeapWord* finger = span.end();
4139     bump_global_finger(finger);   // atomically
4140     // There are null tasks here corresponding to chunks
4141     // beyond the "top" address of the space.
4142     span = span.intersection(sp->used_region());
4143     if (!span.is_empty()) {  // Non-null task
4144       HeapWord* prev_obj;
4145       assert(!span.contains(_restart_addr) || nth_task == 0,
4146              "Inconsistency");
4147       if (nth_task == 0) {
4148         // For the 0th task, we'll not need to compute a block_start.
4149         if (span.contains(_restart_addr)) {
4150           // In the case of a restart because of stack overflow,
4151           // we might additionally skip a chunk prefix.
4152           prev_obj = _restart_addr;
4153         } else {
4154           prev_obj = span.start();
4155         }
4156       } else {
4157         // We want to skip the first object because
4158         // the protocol is to scan any object in its entirety
4159         // that _starts_ in this span; a fortiori, any
4160         // object starting in an earlier span is scanned
4161         // as part of an earlier claimed task.
4162         // Below we use the "careful" version of block_start
4163         // so we do not try to navigate uninitialized objects.
4164         prev_obj = sp->block_start_careful(span.start());
4165         // Below we use a variant of block_size that uses the
4166         // Printezis bits to avoid waiting for allocated
4167         // objects to become initialized/parsable.
4168         while (prev_obj < span.start()) {
4169           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4170           if (sz > 0) {
4171             prev_obj += sz;
4172           } else {
4173             // In this case we may end up doing a bit of redundant
4174             // scanning, but that appears unavoidable, short of
4175             // locking the free list locks; see bug 6324141.
4176             break;
4177           }
4178         }
4179       }
4180       if (prev_obj < span.end()) {
4181         MemRegion my_span = MemRegion(prev_obj, span.end());
4182         // Do the marking work within a non-empty span --
4183         // the last argument to the constructor indicates whether the
4184         // iteration should be incremental with periodic yields.
4185         Par_MarkFromRootsClosure cl(this, _collector, my_span,
4186                                     &_collector->_markBitMap,
4187                                     work_queue(i),
4188                                     &_collector->_markStack,
4189                                     _asynch);
4190         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4191       } // else nothing to do for this task
4192     }   // else nothing to do for this task
4193   }
4194   // We'd be tempted to assert here that since there are no
4195   // more tasks left to claim in this space, the global_finger
4196   // must exceed space->top() and a fortiori space->end(). However,
4197   // that would not quite be correct because the bumping of
4198   // global_finger occurs strictly after the claiming of a task,
4199   // so by the time we reach here the global finger may not yet
4200   // have been bumped up by the thread that claimed the last
4201   // task.
4202   pst->all_tasks_completed();
4203 }
4204 
4205 class Par_ConcMarkingClosure: public CMSOopClosure {
4206  private:
4207   CMSCollector* _collector;
4208   CMSConcMarkingTask* _task;
4209   MemRegion     _span;
4210   CMSBitMap*    _bit_map;
4211   CMSMarkStack* _overflow_stack;
4212   OopTaskQueue* _work_queue;
4213  protected:
4214   DO_OOP_WORK_DEFN
4215  public:
4216   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4217                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4218     CMSOopClosure(collector->ref_processor()),
4219     _collector(collector),
4220     _task(task),
4221     _span(collector->_span),
4222     _work_queue(work_queue),
4223     _bit_map(bit_map),
4224     _overflow_stack(overflow_stack)
4225   { }
4226   virtual void do_oop(oop* p);
4227   virtual void do_oop(narrowOop* p);
4228 
4229   void trim_queue(size_t max);
4230   void handle_stack_overflow(HeapWord* lost);
4231   void do_yield_check() {
4232     if (_task->should_yield()) {
4233       _task->yield();
4234     }
4235   }
4236 };
4237 
4238 // Grey object scanning during work stealing phase --
4239 // the salient assumption here is that any references
4240 // that are in these stolen objects being scanned must
4241 // already have been initialized (else they would not have
4242 // been published), so we do not need to check for
4243 // uninitialized objects before pushing here.
4244 void Par_ConcMarkingClosure::do_oop(oop obj) {
4245   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4246   HeapWord* addr = (HeapWord*)obj;
4247   // Check if oop points into the CMS generation
4248   // and is not marked
4249   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4250     // a white object ...
4251     // If we manage to "claim" the object, by being the
4252     // first thread to mark it, then we push it on our
4253     // marking stack
4254     if (_bit_map->par_mark(addr)) {     // ... now grey
4255       // push on work queue (grey set)
4256       bool simulate_overflow = false;
4257       NOT_PRODUCT(
4258         if (CMSMarkStackOverflowALot &&
4259             _collector->simulate_overflow()) {
4260           // simulate a stack overflow
4261           simulate_overflow = true;
4262         }
4263       )
4264       if (simulate_overflow ||
4265           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4266         // stack overflow
4267         if (PrintCMSStatistics != 0) {
4268           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4269                                  SIZE_FORMAT, _overflow_stack->capacity());
4270         }
4271         // We cannot assert that the overflow stack is full because
4272         // it may have been emptied since.
4273         assert(simulate_overflow ||
4274                _work_queue->size() == _work_queue->max_elems(),
4275               "Else push should have succeeded");
4276         handle_stack_overflow(addr);
4277       }
4278     } // Else, some other thread got there first
4279     do_yield_check();
4280   }
4281 }
4282 
4283 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
4284 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4285 
4286 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4287   while (_work_queue->size() > max) {
4288     oop new_oop;
4289     if (_work_queue->pop_local(new_oop)) {
4290       assert(new_oop->is_oop(), "Should be an oop");
4291       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4292       assert(_span.contains((HeapWord*)new_oop), "Not in span");
4293       new_oop->oop_iterate(this);  // do_oop() above
4294       do_yield_check();
4295     }
4296   }
4297 }
4298 
4299 // Upon stack overflow, we discard (part of) the stack,
4300 // remembering the least address amongst those discarded
4301 // in CMSCollector's _restart_address.
4302 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4303   // We need to do this under a mutex to prevent other
4304   // workers from interfering with the work done below.
4305   MutexLockerEx ml(_overflow_stack->par_lock(),
4306                    Mutex::_no_safepoint_check_flag);
4307   // Remember the least grey address discarded
4308   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4309   _collector->lower_restart_addr(ra);
4310   _overflow_stack->reset();  // discard stack contents
4311   _overflow_stack->expand(); // expand the stack if possible
4312 }
4313 
4314 
4315 void CMSConcMarkingTask::do_work_steal(int i) {
4316   OopTaskQueue* work_q = work_queue(i);
4317   oop obj_to_scan;
4318   CMSBitMap* bm = &(_collector->_markBitMap);
4319   CMSMarkStack* ovflw = &(_collector->_markStack);
4320   int* seed = _collector->hash_seed(i);
4321   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4322   while (true) {
4323     cl.trim_queue(0);
4324     assert(work_q->size() == 0, "Should have been emptied above");
4325     if (get_work_from_overflow_stack(ovflw, work_q)) {
4326       // Can't assert below because the work obtained from the
4327       // overflow stack may already have been stolen from us.
4328       // assert(work_q->size() > 0, "Work from overflow stack");
4329       continue;
4330     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4331       assert(obj_to_scan->is_oop(), "Should be an oop");
4332       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4333       obj_to_scan->oop_iterate(&cl);
4334     } else if (terminator()->offer_termination(&_term_term)) {
4335       assert(work_q->size() == 0, "Impossible!");
4336       break;
4337     } else if (yielding() || should_yield()) {
4338       yield();
4339     }
4340   }
4341 }
4342 
4343 // This is run by the CMS (coordinator) thread.
4344 void CMSConcMarkingTask::coordinator_yield() {
4345   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4346          "CMS thread should hold CMS token");
4347   // First give up the locks, then yield, then re-lock
4348   // We should probably use a constructor/destructor idiom to
4349   // do this unlock/lock or modify the MutexUnlocker class to
4350   // serve our purpose. XXX
4351   assert_lock_strong(_bit_map_lock);
4352   _bit_map_lock->unlock();
4353   ConcurrentMarkSweepThread::desynchronize(true);
4354   ConcurrentMarkSweepThread::acknowledge_yield_request();
4355   _collector->stopTimer();
4356   if (PrintCMSStatistics != 0) {
4357     _collector->incrementYields();
4358   }
4359   _collector->icms_wait();
4360 
4361   // It is possible for whichever thread initiated the yield request
4362   // not to get a chance to wake up and take the bitmap lock between
4363   // this thread releasing it and reacquiring it. So, while the
4364   // should_yield() flag is on, let's sleep for a bit to give the
4365   // other thread a chance to wake up. The limit imposed on the number
4366   // of iterations is defensive, to avoid any unforseen circumstances
4367   // putting us into an infinite loop. Since it's always been this
4368   // (coordinator_yield()) method that was observed to cause the
4369   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4370   // which is by default non-zero. For the other seven methods that
4371   // also perform the yield operation, as are using a different
4372   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4373   // can enable the sleeping for those methods too, if necessary.
4374   // See 6442774.
4375   //
4376   // We really need to reconsider the synchronization between the GC
4377   // thread and the yield-requesting threads in the future and we
4378   // should really use wait/notify, which is the recommended
4379   // way of doing this type of interaction. Additionally, we should
4380   // consolidate the eight methods that do the yield operation and they
4381   // are almost identical into one for better maintainability and
4382   // readability. See 6445193.
4383   //
4384   // Tony 2006.06.29
4385   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4386                    ConcurrentMarkSweepThread::should_yield() &&
4387                    !CMSCollector::foregroundGCIsActive(); ++i) {
4388     os::sleep(Thread::current(), 1, false);
4389     ConcurrentMarkSweepThread::acknowledge_yield_request();
4390   }
4391 
4392   ConcurrentMarkSweepThread::synchronize(true);
4393   _bit_map_lock->lock_without_safepoint_check();
4394   _collector->startTimer();
4395 }
4396 
4397 bool CMSCollector::do_marking_mt(bool asynch) {
4398   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4399   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4400                                        conc_workers()->total_workers(),
4401                                        conc_workers()->active_workers(),
4402                                        Threads::number_of_non_daemon_threads());
4403   conc_workers()->set_active_workers(num_workers);
4404 
4405   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4406 
4407   CMSConcMarkingTask tsk(this,
4408                          cms_space,
4409                          asynch,
4410                          conc_workers(),
4411                          task_queues());
4412 
4413   // Since the actual number of workers we get may be different
4414   // from the number we requested above, do we need to do anything different
4415   // below? In particular, may be we need to subclass the SequantialSubTasksDone
4416   // class?? XXX
4417   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4418 
4419   // Refs discovery is already non-atomic.
4420   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4421   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4422   conc_workers()->start_task(&tsk);
4423   while (tsk.yielded()) {
4424     tsk.coordinator_yield();
4425     conc_workers()->continue_task(&tsk);
4426   }
4427   // If the task was aborted, _restart_addr will be non-NULL
4428   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4429   while (_restart_addr != NULL) {
4430     // XXX For now we do not make use of ABORTED state and have not
4431     // yet implemented the right abort semantics (even in the original
4432     // single-threaded CMS case). That needs some more investigation
4433     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4434     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4435     // If _restart_addr is non-NULL, a marking stack overflow
4436     // occurred; we need to do a fresh marking iteration from the
4437     // indicated restart address.
4438     if (_foregroundGCIsActive && asynch) {
4439       // We may be running into repeated stack overflows, having
4440       // reached the limit of the stack size, while making very
4441       // slow forward progress. It may be best to bail out and
4442       // let the foreground collector do its job.
4443       // Clear _restart_addr, so that foreground GC
4444       // works from scratch. This avoids the headache of
4445       // a "rescan" which would otherwise be needed because
4446       // of the dirty mod union table & card table.
4447       _restart_addr = NULL;
4448       return false;
4449     }
4450     // Adjust the task to restart from _restart_addr
4451     tsk.reset(_restart_addr);
4452     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4453                   _restart_addr);
4454     _restart_addr = NULL;
4455     // Get the workers going again
4456     conc_workers()->start_task(&tsk);
4457     while (tsk.yielded()) {
4458       tsk.coordinator_yield();
4459       conc_workers()->continue_task(&tsk);
4460     }
4461   }
4462   assert(tsk.completed(), "Inconsistency");
4463   assert(tsk.result() == true, "Inconsistency");
4464   return true;
4465 }
4466 
4467 bool CMSCollector::do_marking_st(bool asynch) {
4468   ResourceMark rm;
4469   HandleMark   hm;
4470 
4471   // Temporarily make refs discovery single threaded (non-MT)
4472   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4473   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4474     &_markStack, CMSYield && asynch);
4475   // the last argument to iterate indicates whether the iteration
4476   // should be incremental with periodic yields.
4477   _markBitMap.iterate(&markFromRootsClosure);
4478   // If _restart_addr is non-NULL, a marking stack overflow
4479   // occurred; we need to do a fresh iteration from the
4480   // indicated restart address.
4481   while (_restart_addr != NULL) {
4482     if (_foregroundGCIsActive && asynch) {
4483       // We may be running into repeated stack overflows, having
4484       // reached the limit of the stack size, while making very
4485       // slow forward progress. It may be best to bail out and
4486       // let the foreground collector do its job.
4487       // Clear _restart_addr, so that foreground GC
4488       // works from scratch. This avoids the headache of
4489       // a "rescan" which would otherwise be needed because
4490       // of the dirty mod union table & card table.
4491       _restart_addr = NULL;
4492       return false;  // indicating failure to complete marking
4493     }
4494     // Deal with stack overflow:
4495     // we restart marking from _restart_addr
4496     HeapWord* ra = _restart_addr;
4497     markFromRootsClosure.reset(ra);
4498     _restart_addr = NULL;
4499     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4500   }
4501   return true;
4502 }
4503 
4504 void CMSCollector::preclean() {
4505   check_correct_thread_executing();
4506   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4507   verify_work_stacks_empty();
4508   verify_overflow_empty();
4509   _abort_preclean = false;
4510   if (CMSPrecleaningEnabled) {
4511     if (!CMSEdenChunksRecordAlways) {
4512       _eden_chunk_index = 0;
4513     }
4514     size_t used = get_eden_used();
4515     size_t capacity = get_eden_capacity();
4516     // Don't start sampling unless we will get sufficiently
4517     // many samples.
4518     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4519                 * CMSScheduleRemarkEdenPenetration)) {
4520       _start_sampling = true;
4521     } else {
4522       _start_sampling = false;
4523     }
4524     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4525     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4526     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4527   }
4528   CMSTokenSync x(true); // is cms thread
4529   if (CMSPrecleaningEnabled) {
4530     sample_eden();
4531     _collectorState = AbortablePreclean;
4532   } else {
4533     _collectorState = FinalMarking;
4534   }
4535   verify_work_stacks_empty();
4536   verify_overflow_empty();
4537 }
4538 
4539 // Try and schedule the remark such that young gen
4540 // occupancy is CMSScheduleRemarkEdenPenetration %.
4541 void CMSCollector::abortable_preclean() {
4542   check_correct_thread_executing();
4543   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
4544   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4545 
4546   // If Eden's current occupancy is below this threshold,
4547   // immediately schedule the remark; else preclean
4548   // past the next scavenge in an effort to
4549   // schedule the pause as described above. By choosing
4550   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4551   // we will never do an actual abortable preclean cycle.
4552   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4553     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4554     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4555     // We need more smarts in the abortable preclean
4556     // loop below to deal with cases where allocation
4557     // in young gen is very very slow, and our precleaning
4558     // is running a losing race against a horde of
4559     // mutators intent on flooding us with CMS updates
4560     // (dirty cards).
4561     // One, admittedly dumb, strategy is to give up
4562     // after a certain number of abortable precleaning loops
4563     // or after a certain maximum time. We want to make
4564     // this smarter in the next iteration.
4565     // XXX FIX ME!!! YSR
4566     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4567     while (!(should_abort_preclean() ||
4568              ConcurrentMarkSweepThread::should_terminate())) {
4569       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4570       cumworkdone += workdone;
4571       loops++;
4572       // Voluntarily terminate abortable preclean phase if we have
4573       // been at it for too long.
4574       if ((CMSMaxAbortablePrecleanLoops != 0) &&
4575           loops >= CMSMaxAbortablePrecleanLoops) {
4576         if (PrintGCDetails) {
4577           gclog_or_tty->print(" CMS: abort preclean due to loops ");
4578         }
4579         break;
4580       }
4581       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4582         if (PrintGCDetails) {
4583           gclog_or_tty->print(" CMS: abort preclean due to time ");
4584         }
4585         break;
4586       }
4587       // If we are doing little work each iteration, we should
4588       // take a short break.
4589       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4590         // Sleep for some time, waiting for work to accumulate
4591         stopTimer();
4592         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4593         startTimer();
4594         waited++;
4595       }
4596     }
4597     if (PrintCMSStatistics > 0) {
4598       gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
4599                           loops, waited, cumworkdone);
4600     }
4601   }
4602   CMSTokenSync x(true); // is cms thread
4603   if (_collectorState != Idling) {
4604     assert(_collectorState == AbortablePreclean,
4605            "Spontaneous state transition?");
4606     _collectorState = FinalMarking;
4607   } // Else, a foreground collection completed this CMS cycle.
4608   return;
4609 }
4610 
4611 // Respond to an Eden sampling opportunity
4612 void CMSCollector::sample_eden() {
4613   // Make sure a young gc cannot sneak in between our
4614   // reading and recording of a sample.
4615   assert(Thread::current()->is_ConcurrentGC_thread(),
4616          "Only the cms thread may collect Eden samples");
4617   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4618          "Should collect samples while holding CMS token");
4619   if (!_start_sampling) {
4620     return;
4621   }
4622   // When CMSEdenChunksRecordAlways is true, the eden chunk array
4623   // is populated by the young generation.
4624   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4625     if (_eden_chunk_index < _eden_chunk_capacity) {
4626       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4627       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4628              "Unexpected state of Eden");
4629       // We'd like to check that what we just sampled is an oop-start address;
4630       // however, we cannot do that here since the object may not yet have been
4631       // initialized. So we'll instead do the check when we _use_ this sample
4632       // later.
4633       if (_eden_chunk_index == 0 ||
4634           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4635                          _eden_chunk_array[_eden_chunk_index-1])
4636            >= CMSSamplingGrain)) {
4637         _eden_chunk_index++;  // commit sample
4638       }
4639     }
4640   }
4641   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4642     size_t used = get_eden_used();
4643     size_t capacity = get_eden_capacity();
4644     assert(used <= capacity, "Unexpected state of Eden");
4645     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4646       _abort_preclean = true;
4647     }
4648   }
4649 }
4650 
4651 
4652 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4653   assert(_collectorState == Precleaning ||
4654          _collectorState == AbortablePreclean, "incorrect state");
4655   ResourceMark rm;
4656   HandleMark   hm;
4657 
4658   // Precleaning is currently not MT but the reference processor
4659   // may be set for MT.  Disable it temporarily here.
4660   ReferenceProcessor* rp = ref_processor();
4661   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4662 
4663   // Do one pass of scrubbing the discovered reference lists
4664   // to remove any reference objects with strongly-reachable
4665   // referents.
4666   if (clean_refs) {
4667     CMSPrecleanRefsYieldClosure yield_cl(this);
4668     assert(rp->span().equals(_span), "Spans should be equal");
4669     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4670                                    &_markStack, true /* preclean */);
4671     CMSDrainMarkingStackClosure complete_trace(this,
4672                                    _span, &_markBitMap, &_markStack,
4673                                    &keep_alive, true /* preclean */);
4674 
4675     // We don't want this step to interfere with a young
4676     // collection because we don't want to take CPU
4677     // or memory bandwidth away from the young GC threads
4678     // (which may be as many as there are CPUs).
4679     // Note that we don't need to protect ourselves from
4680     // interference with mutators because they can't
4681     // manipulate the discovered reference lists nor affect
4682     // the computed reachability of the referents, the
4683     // only properties manipulated by the precleaning
4684     // of these reference lists.
4685     stopTimer();
4686     CMSTokenSyncWithLocks x(true /* is cms thread */,
4687                             bitMapLock());
4688     startTimer();
4689     sample_eden();
4690 
4691     // The following will yield to allow foreground
4692     // collection to proceed promptly. XXX YSR:
4693     // The code in this method may need further
4694     // tweaking for better performance and some restructuring
4695     // for cleaner interfaces.
4696     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4697     rp->preclean_discovered_references(
4698           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4699           gc_timer);
4700   }
4701 
4702   if (clean_survivor) {  // preclean the active survivor space(s)
4703     assert(_young_gen->kind() == Generation::DefNew ||
4704            _young_gen->kind() == Generation::ParNew ||
4705            _young_gen->kind() == Generation::ASParNew,
4706          "incorrect type for cast");
4707     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4708     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4709                              &_markBitMap, &_modUnionTable,
4710                              &_markStack, true /* precleaning phase */);
4711     stopTimer();
4712     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4713                              bitMapLock());
4714     startTimer();
4715     unsigned int before_count =
4716       GenCollectedHeap::heap()->total_collections();
4717     SurvivorSpacePrecleanClosure
4718       sss_cl(this, _span, &_markBitMap, &_markStack,
4719              &pam_cl, before_count, CMSYield);
4720     dng->from()->object_iterate_careful(&sss_cl);
4721     dng->to()->object_iterate_careful(&sss_cl);
4722   }
4723   MarkRefsIntoAndScanClosure
4724     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4725              &_markStack, this, CMSYield,
4726              true /* precleaning phase */);
4727   // CAUTION: The following closure has persistent state that may need to
4728   // be reset upon a decrease in the sequence of addresses it
4729   // processes.
4730   ScanMarkedObjectsAgainCarefullyClosure
4731     smoac_cl(this, _span,
4732       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4733 
4734   // Preclean dirty cards in ModUnionTable and CardTable using
4735   // appropriate convergence criterion;
4736   // repeat CMSPrecleanIter times unless we find that
4737   // we are losing.
4738   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4739   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4740          "Bad convergence multiplier");
4741   assert(CMSPrecleanThreshold >= 100,
4742          "Unreasonably low CMSPrecleanThreshold");
4743 
4744   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4745   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4746        numIter < CMSPrecleanIter;
4747        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4748     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4749     if (Verbose && PrintGCDetails) {
4750       gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
4751     }
4752     // Either there are very few dirty cards, so re-mark
4753     // pause will be small anyway, or our pre-cleaning isn't
4754     // that much faster than the rate at which cards are being
4755     // dirtied, so we might as well stop and re-mark since
4756     // precleaning won't improve our re-mark time by much.
4757     if (curNumCards <= CMSPrecleanThreshold ||
4758         (numIter > 0 &&
4759          (curNumCards * CMSPrecleanDenominator >
4760          lastNumCards * CMSPrecleanNumerator))) {
4761       numIter++;
4762       cumNumCards += curNumCards;
4763       break;
4764     }
4765   }
4766 
4767   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4768 
4769   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4770   cumNumCards += curNumCards;
4771   if (PrintGCDetails && PrintCMSStatistics != 0) {
4772     gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
4773                   curNumCards, cumNumCards, numIter);
4774   }
4775   return cumNumCards;   // as a measure of useful work done
4776 }
4777 
4778 // PRECLEANING NOTES:
4779 // Precleaning involves:
4780 // . reading the bits of the modUnionTable and clearing the set bits.
4781 // . For the cards corresponding to the set bits, we scan the
4782 //   objects on those cards. This means we need the free_list_lock
4783 //   so that we can safely iterate over the CMS space when scanning
4784 //   for oops.
4785 // . When we scan the objects, we'll be both reading and setting
4786 //   marks in the marking bit map, so we'll need the marking bit map.
4787 // . For protecting _collector_state transitions, we take the CGC_lock.
4788 //   Note that any races in the reading of of card table entries by the
4789 //   CMS thread on the one hand and the clearing of those entries by the
4790 //   VM thread or the setting of those entries by the mutator threads on the
4791 //   other are quite benign. However, for efficiency it makes sense to keep
4792 //   the VM thread from racing with the CMS thread while the latter is
4793 //   dirty card info to the modUnionTable. We therefore also use the
4794 //   CGC_lock to protect the reading of the card table and the mod union
4795 //   table by the CM thread.
4796 // . We run concurrently with mutator updates, so scanning
4797 //   needs to be done carefully  -- we should not try to scan
4798 //   potentially uninitialized objects.
4799 //
4800 // Locking strategy: While holding the CGC_lock, we scan over and
4801 // reset a maximal dirty range of the mod union / card tables, then lock
4802 // the free_list_lock and bitmap lock to do a full marking, then
4803 // release these locks; and repeat the cycle. This allows for a
4804 // certain amount of fairness in the sharing of these locks between
4805 // the CMS collector on the one hand, and the VM thread and the
4806 // mutators on the other.
4807 
4808 // NOTE: preclean_mod_union_table() and preclean_card_table()
4809 // further below are largely identical; if you need to modify
4810 // one of these methods, please check the other method too.
4811 
4812 size_t CMSCollector::preclean_mod_union_table(
4813   ConcurrentMarkSweepGeneration* gen,
4814   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4815   verify_work_stacks_empty();
4816   verify_overflow_empty();
4817 
4818   // strategy: starting with the first card, accumulate contiguous
4819   // ranges of dirty cards; clear these cards, then scan the region
4820   // covered by these cards.
4821 
4822   // Since all of the MUT is committed ahead, we can just use
4823   // that, in case the generations expand while we are precleaning.
4824   // It might also be fine to just use the committed part of the
4825   // generation, but we might potentially miss cards when the
4826   // generation is rapidly expanding while we are in the midst
4827   // of precleaning.
4828   HeapWord* startAddr = gen->reserved().start();
4829   HeapWord* endAddr   = gen->reserved().end();
4830 
4831   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4832 
4833   size_t numDirtyCards, cumNumDirtyCards;
4834   HeapWord *nextAddr, *lastAddr;
4835   for (cumNumDirtyCards = numDirtyCards = 0,
4836        nextAddr = lastAddr = startAddr;
4837        nextAddr < endAddr;
4838        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4839 
4840     ResourceMark rm;
4841     HandleMark   hm;
4842 
4843     MemRegion dirtyRegion;
4844     {
4845       stopTimer();
4846       // Potential yield point
4847       CMSTokenSync ts(true);
4848       startTimer();
4849       sample_eden();
4850       // Get dirty region starting at nextOffset (inclusive),
4851       // simultaneously clearing it.
4852       dirtyRegion =
4853         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4854       assert(dirtyRegion.start() >= nextAddr,
4855              "returned region inconsistent?");
4856     }
4857     // Remember where the next search should begin.
4858     // The returned region (if non-empty) is a right open interval,
4859     // so lastOffset is obtained from the right end of that
4860     // interval.
4861     lastAddr = dirtyRegion.end();
4862     // Should do something more transparent and less hacky XXX
4863     numDirtyCards =
4864       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4865 
4866     // We'll scan the cards in the dirty region (with periodic
4867     // yields for foreground GC as needed).
4868     if (!dirtyRegion.is_empty()) {
4869       assert(numDirtyCards > 0, "consistency check");
4870       HeapWord* stop_point = NULL;
4871       stopTimer();
4872       // Potential yield point
4873       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4874                                bitMapLock());
4875       startTimer();
4876       {
4877         verify_work_stacks_empty();
4878         verify_overflow_empty();
4879         sample_eden();
4880         stop_point =
4881           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4882       }
4883       if (stop_point != NULL) {
4884         // The careful iteration stopped early either because it found an
4885         // uninitialized object, or because we were in the midst of an
4886         // "abortable preclean", which should now be aborted. Redirty
4887         // the bits corresponding to the partially-scanned or unscanned
4888         // cards. We'll either restart at the next block boundary or
4889         // abort the preclean.
4890         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4891                "Should only be AbortablePreclean.");
4892         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4893         if (should_abort_preclean()) {
4894           break; // out of preclean loop
4895         } else {
4896           // Compute the next address at which preclean should pick up;
4897           // might need bitMapLock in order to read P-bits.
4898           lastAddr = next_card_start_after_block(stop_point);
4899         }
4900       }
4901     } else {
4902       assert(lastAddr == endAddr, "consistency check");
4903       assert(numDirtyCards == 0, "consistency check");
4904       break;
4905     }
4906   }
4907   verify_work_stacks_empty();
4908   verify_overflow_empty();
4909   return cumNumDirtyCards;
4910 }
4911 
4912 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4913 // below are largely identical; if you need to modify
4914 // one of these methods, please check the other method too.
4915 
4916 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4917   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4918   // strategy: it's similar to precleamModUnionTable above, in that
4919   // we accumulate contiguous ranges of dirty cards, mark these cards
4920   // precleaned, then scan the region covered by these cards.
4921   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4922   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4923 
4924   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4925 
4926   size_t numDirtyCards, cumNumDirtyCards;
4927   HeapWord *lastAddr, *nextAddr;
4928 
4929   for (cumNumDirtyCards = numDirtyCards = 0,
4930        nextAddr = lastAddr = startAddr;
4931        nextAddr < endAddr;
4932        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4933 
4934     ResourceMark rm;
4935     HandleMark   hm;
4936 
4937     MemRegion dirtyRegion;
4938     {
4939       // See comments in "Precleaning notes" above on why we
4940       // do this locking. XXX Could the locking overheads be
4941       // too high when dirty cards are sparse? [I don't think so.]
4942       stopTimer();
4943       CMSTokenSync x(true); // is cms thread
4944       startTimer();
4945       sample_eden();
4946       // Get and clear dirty region from card table
4947       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4948                                     MemRegion(nextAddr, endAddr),
4949                                     true,
4950                                     CardTableModRefBS::precleaned_card_val());
4951 
4952       assert(dirtyRegion.start() >= nextAddr,
4953              "returned region inconsistent?");
4954     }
4955     lastAddr = dirtyRegion.end();
4956     numDirtyCards =
4957       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4958 
4959     if (!dirtyRegion.is_empty()) {
4960       stopTimer();
4961       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4962       startTimer();
4963       sample_eden();
4964       verify_work_stacks_empty();
4965       verify_overflow_empty();
4966       HeapWord* stop_point =
4967         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4968       if (stop_point != NULL) {
4969         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4970                "Should only be AbortablePreclean.");
4971         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4972         if (should_abort_preclean()) {
4973           break; // out of preclean loop
4974         } else {
4975           // Compute the next address at which preclean should pick up.
4976           lastAddr = next_card_start_after_block(stop_point);
4977         }
4978       }
4979     } else {
4980       break;
4981     }
4982   }
4983   verify_work_stacks_empty();
4984   verify_overflow_empty();
4985   return cumNumDirtyCards;
4986 }
4987 
4988 class PrecleanKlassClosure : public KlassClosure {
4989   CMKlassClosure _cm_klass_closure;
4990  public:
4991   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4992   void do_klass(Klass* k) {
4993     if (k->has_accumulated_modified_oops()) {
4994       k->clear_accumulated_modified_oops();
4995 
4996       _cm_klass_closure.do_klass(k);
4997     }
4998   }
4999 };
5000 
5001 // The freelist lock is needed to prevent asserts, is it really needed?
5002 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
5003 
5004   cl->set_freelistLock(freelistLock);
5005 
5006   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
5007 
5008   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5009   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5010   PrecleanKlassClosure preclean_klass_closure(cl);
5011   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
5012 
5013   verify_work_stacks_empty();
5014   verify_overflow_empty();
5015 }
5016 
5017 void CMSCollector::checkpointRootsFinal(bool asynch,
5018   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5019   assert(_collectorState == FinalMarking, "incorrect state transition?");
5020   check_correct_thread_executing();
5021   // world is stopped at this checkpoint
5022   assert(SafepointSynchronize::is_at_safepoint(),
5023          "world should be stopped");
5024   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5025 
5026   verify_work_stacks_empty();
5027   verify_overflow_empty();
5028 
5029   SpecializationStats::clear();
5030   if (PrintGCDetails) {
5031     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
5032                         _young_gen->used() / K,
5033                         _young_gen->capacity() / K);
5034   }
5035   if (asynch) {
5036     if (CMSScavengeBeforeRemark) {
5037       GenCollectedHeap* gch = GenCollectedHeap::heap();
5038       // Temporarily set flag to false, GCH->do_collection will
5039       // expect it to be false and set to true
5040       FlagSetting fl(gch->_is_gc_active, false);
5041       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5042         PrintGCDetails && Verbose, true, _gc_timer_cm);)
5043       int level = _cmsGen->level() - 1;
5044       if (level >= 0) {
5045         gch->do_collection(true,        // full (i.e. force, see below)
5046                            false,       // !clear_all_soft_refs
5047                            0,           // size
5048                            false,       // is_tlab
5049                            level        // max_level
5050                           );
5051       }
5052     }
5053     FreelistLocker x(this);
5054     MutexLockerEx y(bitMapLock(),
5055                     Mutex::_no_safepoint_check_flag);
5056     assert(!init_mark_was_synchronous, "but that's impossible!");
5057     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5058   } else {
5059     // already have all the locks
5060     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5061                              init_mark_was_synchronous);
5062   }
5063   verify_work_stacks_empty();
5064   verify_overflow_empty();
5065   SpecializationStats::print();
5066 }
5067 
5068 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5069   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5070 
5071   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
5072 
5073   assert(haveFreelistLocks(), "must have free list locks");
5074   assert_lock_strong(bitMapLock());
5075 
5076   if (UseAdaptiveSizePolicy) {
5077     size_policy()->checkpoint_roots_final_begin();
5078   }
5079 
5080   ResourceMark rm;
5081   HandleMark   hm;
5082 
5083   GenCollectedHeap* gch = GenCollectedHeap::heap();
5084 
5085   if (should_unload_classes()) {
5086     CodeCache::gc_prologue();
5087   }
5088   assert(haveFreelistLocks(), "must have free list locks");
5089   assert_lock_strong(bitMapLock());
5090 
5091   if (!init_mark_was_synchronous) {
5092     // We might assume that we need not fill TLAB's when
5093     // CMSScavengeBeforeRemark is set, because we may have just done
5094     // a scavenge which would have filled all TLAB's -- and besides
5095     // Eden would be empty. This however may not always be the case --
5096     // for instance although we asked for a scavenge, it may not have
5097     // happened because of a JNI critical section. We probably need
5098     // a policy for deciding whether we can in that case wait until
5099     // the critical section releases and then do the remark following
5100     // the scavenge, and skip it here. In the absence of that policy,
5101     // or of an indication of whether the scavenge did indeed occur,
5102     // we cannot rely on TLAB's having been filled and must do
5103     // so here just in case a scavenge did not happen.
5104     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
5105     // Update the saved marks which may affect the root scans.
5106     gch->save_marks();
5107 
5108     if (CMSPrintEdenSurvivorChunks) {
5109       print_eden_and_survivor_chunk_arrays();
5110     }
5111 
5112     {
5113       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5114 
5115       // Note on the role of the mod union table:
5116       // Since the marker in "markFromRoots" marks concurrently with
5117       // mutators, it is possible for some reachable objects not to have been
5118       // scanned. For instance, an only reference to an object A was
5119       // placed in object B after the marker scanned B. Unless B is rescanned,
5120       // A would be collected. Such updates to references in marked objects
5121       // are detected via the mod union table which is the set of all cards
5122       // dirtied since the first checkpoint in this GC cycle and prior to
5123       // the most recent young generation GC, minus those cleaned up by the
5124       // concurrent precleaning.
5125       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5126         GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
5127         do_remark_parallel();
5128       } else {
5129         GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5130                     _gc_timer_cm);
5131         do_remark_non_parallel();
5132       }
5133     }
5134   } else {
5135     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5136     // The initial mark was stop-world, so there's no rescanning to
5137     // do; go straight on to the next step below.
5138   }
5139   verify_work_stacks_empty();
5140   verify_overflow_empty();
5141 
5142   {
5143     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
5144     refProcessingWork(asynch, clear_all_soft_refs);
5145   }
5146   verify_work_stacks_empty();
5147   verify_overflow_empty();
5148 
5149   if (should_unload_classes()) {
5150     CodeCache::gc_epilogue();
5151   }
5152   JvmtiExport::gc_epilogue();
5153 
5154   // If we encountered any (marking stack / work queue) overflow
5155   // events during the current CMS cycle, take appropriate
5156   // remedial measures, where possible, so as to try and avoid
5157   // recurrence of that condition.
5158   assert(_markStack.isEmpty(), "No grey objects");
5159   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5160                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
5161   if (ser_ovflw > 0) {
5162     if (PrintCMSStatistics != 0) {
5163       gclog_or_tty->print_cr("Marking stack overflow (benign) "
5164         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5165         ", kac_preclean="SIZE_FORMAT")",
5166         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5167         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5168     }
5169     _markStack.expand();
5170     _ser_pmc_remark_ovflw = 0;
5171     _ser_pmc_preclean_ovflw = 0;
5172     _ser_kac_preclean_ovflw = 0;
5173     _ser_kac_ovflw = 0;
5174   }
5175   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5176     if (PrintCMSStatistics != 0) {
5177       gclog_or_tty->print_cr("Work queue overflow (benign) "
5178         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5179         _par_pmc_remark_ovflw, _par_kac_ovflw);
5180     }
5181     _par_pmc_remark_ovflw = 0;
5182     _par_kac_ovflw = 0;
5183   }
5184   if (PrintCMSStatistics != 0) {
5185      if (_markStack._hit_limit > 0) {
5186        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5187                               _markStack._hit_limit);
5188      }
5189      if (_markStack._failed_double > 0) {
5190        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5191                               " current capacity "SIZE_FORMAT,
5192                               _markStack._failed_double,
5193                               _markStack.capacity());
5194      }
5195   }
5196   _markStack._hit_limit = 0;
5197   _markStack._failed_double = 0;
5198 
5199   if ((VerifyAfterGC || VerifyDuringGC) &&
5200       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5201     verify_after_remark();
5202   }
5203 
5204   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5205 
5206   // Change under the freelistLocks.
5207   _collectorState = Sweeping;
5208   // Call isAllClear() under bitMapLock
5209   assert(_modUnionTable.isAllClear(),
5210       "Should be clear by end of the final marking");
5211   assert(_ct->klass_rem_set()->mod_union_is_clear(),
5212       "Should be clear by end of the final marking");
5213   if (UseAdaptiveSizePolicy) {
5214     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5215   }
5216 }
5217 
5218 void CMSParInitialMarkTask::work(uint worker_id) {
5219   elapsedTimer _timer;
5220   ResourceMark rm;
5221   HandleMark   hm;
5222 
5223   // ---------- scan from roots --------------
5224   _timer.start();
5225   GenCollectedHeap* gch = GenCollectedHeap::heap();
5226   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5227   CMKlassClosure klass_closure(&par_mri_cl);
5228 
5229   // ---------- young gen roots --------------
5230   {
5231     work_on_young_gen_roots(worker_id, &par_mri_cl);
5232     _timer.stop();
5233     if (PrintCMSStatistics != 0) {
5234       gclog_or_tty->print_cr(
5235         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5236         worker_id, _timer.seconds());
5237     }
5238   }
5239 
5240   // ---------- remaining roots --------------
5241   _timer.reset();
5242   _timer.start();
5243   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5244                                 false,     // yg was scanned above
5245                                 false,     // this is parallel code
5246                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5247                                 &par_mri_cl,
5248                                 NULL,
5249                                 &klass_closure);
5250   assert(_collector->should_unload_classes()
5251          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5252          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5253   _timer.stop();
5254   if (PrintCMSStatistics != 0) {
5255     gclog_or_tty->print_cr(
5256       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5257       worker_id, _timer.seconds());
5258   }
5259 }
5260 
5261 // Parallel remark task
5262 class CMSParRemarkTask: public CMSParMarkTask {
5263   CompactibleFreeListSpace* _cms_space;
5264 
5265   // The per-thread work queues, available here for stealing.
5266   OopTaskQueueSet*       _task_queues;
5267   ParallelTaskTerminator _term;
5268 
5269  public:
5270   // A value of 0 passed to n_workers will cause the number of
5271   // workers to be taken from the active workers in the work gang.
5272   CMSParRemarkTask(CMSCollector* collector,
5273                    CompactibleFreeListSpace* cms_space,
5274                    int n_workers, FlexibleWorkGang* workers,
5275                    OopTaskQueueSet* task_queues):
5276     CMSParMarkTask("Rescan roots and grey objects in parallel",
5277                    collector, n_workers),
5278     _cms_space(cms_space),
5279     _task_queues(task_queues),
5280     _term(n_workers, task_queues) { }
5281 
5282   OopTaskQueueSet* task_queues() { return _task_queues; }
5283 
5284   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5285 
5286   ParallelTaskTerminator* terminator() { return &_term; }
5287   int n_workers() { return _n_workers; }
5288 
5289   void work(uint worker_id);
5290 
5291  private:
5292   // ... of  dirty cards in old space
5293   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5294                                   Par_MarkRefsIntoAndScanClosure* cl);
5295 
5296   // ... work stealing for the above
5297   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5298 };
5299 
5300 class RemarkKlassClosure : public KlassClosure {
5301   CMKlassClosure _cm_klass_closure;
5302  public:
5303   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5304   void do_klass(Klass* k) {
5305     // Check if we have modified any oops in the Klass during the concurrent marking.
5306     if (k->has_accumulated_modified_oops()) {
5307       k->clear_accumulated_modified_oops();
5308 
5309       // We could have transfered the current modified marks to the accumulated marks,
5310       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5311     } else if (k->has_modified_oops()) {
5312       // Don't clear anything, this info is needed by the next young collection.
5313     } else {
5314       // No modified oops in the Klass.
5315       return;
5316     }
5317 
5318     // The klass has modified fields, need to scan the klass.
5319     _cm_klass_closure.do_klass(k);
5320   }
5321 };
5322 
5323 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5324   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5325   EdenSpace* eden_space = dng->eden();
5326   ContiguousSpace* from_space = dng->from();
5327   ContiguousSpace* to_space   = dng->to();
5328 
5329   HeapWord** eca = _collector->_eden_chunk_array;
5330   size_t     ect = _collector->_eden_chunk_index;
5331   HeapWord** sca = _collector->_survivor_chunk_array;
5332   size_t     sct = _collector->_survivor_chunk_index;
5333 
5334   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5335   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5336 
5337   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5338   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5339   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5340 }
5341 
5342 // work_queue(i) is passed to the closure
5343 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5344 // also is passed to do_dirty_card_rescan_tasks() and to
5345 // do_work_steal() to select the i-th task_queue.
5346 
5347 void CMSParRemarkTask::work(uint worker_id) {
5348   elapsedTimer _timer;
5349   ResourceMark rm;
5350   HandleMark   hm;
5351 
5352   // ---------- rescan from roots --------------
5353   _timer.start();
5354   GenCollectedHeap* gch = GenCollectedHeap::heap();
5355   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5356     _collector->_span, _collector->ref_processor(),
5357     &(_collector->_markBitMap),
5358     work_queue(worker_id));
5359 
5360   // Rescan young gen roots first since these are likely
5361   // coarsely partitioned and may, on that account, constitute
5362   // the critical path; thus, it's best to start off that
5363   // work first.
5364   // ---------- young gen roots --------------
5365   {
5366     work_on_young_gen_roots(worker_id, &par_mrias_cl);
5367     _timer.stop();
5368     if (PrintCMSStatistics != 0) {
5369       gclog_or_tty->print_cr(
5370         "Finished young gen rescan work in %dth thread: %3.3f sec",
5371         worker_id, _timer.seconds());
5372     }
5373   }
5374 
5375   // ---------- remaining roots --------------
5376   _timer.reset();
5377   _timer.start();
5378   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5379                                 false,     // yg was scanned above
5380                                 false,     // this is parallel code
5381                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5382                                 &par_mrias_cl,
5383                                 NULL,
5384                                 NULL);     // The dirty klasses will be handled below
5385   assert(_collector->should_unload_classes()
5386          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5387          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5388   _timer.stop();
5389   if (PrintCMSStatistics != 0) {
5390     gclog_or_tty->print_cr(
5391       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5392       worker_id, _timer.seconds());
5393   }
5394 
5395   // ---------- unhandled CLD scanning ----------
5396   if (worker_id == 0) { // Single threaded at the moment.
5397     _timer.reset();
5398     _timer.start();
5399 
5400     // Scan all new class loader data objects and new dependencies that were
5401     // introduced during concurrent marking.
5402     ResourceMark rm;
5403     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5404     for (int i = 0; i < array->length(); i++) {
5405       par_mrias_cl.do_class_loader_data(array->at(i));
5406     }
5407 
5408     // We don't need to keep track of new CLDs anymore.
5409     ClassLoaderDataGraph::remember_new_clds(false);
5410 
5411     _timer.stop();
5412     if (PrintCMSStatistics != 0) {
5413       gclog_or_tty->print_cr(
5414           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5415           worker_id, _timer.seconds());
5416     }
5417   }
5418 
5419   // ---------- dirty klass scanning ----------
5420   if (worker_id == 0) { // Single threaded at the moment.
5421     _timer.reset();
5422     _timer.start();
5423 
5424     // Scan all classes that was dirtied during the concurrent marking phase.
5425     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5426     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5427 
5428     _timer.stop();
5429     if (PrintCMSStatistics != 0) {
5430       gclog_or_tty->print_cr(
5431           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5432           worker_id, _timer.seconds());
5433     }
5434   }
5435 
5436   // We might have added oops to ClassLoaderData::_handles during the
5437   // concurrent marking phase. These oops point to newly allocated objects
5438   // that are guaranteed to be kept alive. Either by the direct allocation
5439   // code, or when the young collector processes the strong roots. Hence,
5440   // we don't have to revisit the _handles block during the remark phase.
5441 
5442   // ---------- rescan dirty cards ------------
5443   _timer.reset();
5444   _timer.start();
5445 
5446   // Do the rescan tasks for each of the two spaces
5447   // (cms_space) in turn.
5448   // "worker_id" is passed to select the task_queue for "worker_id"
5449   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5450   _timer.stop();
5451   if (PrintCMSStatistics != 0) {
5452     gclog_or_tty->print_cr(
5453       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5454       worker_id, _timer.seconds());
5455   }
5456 
5457   // ---------- steal work from other threads ...
5458   // ---------- ... and drain overflow list.
5459   _timer.reset();
5460   _timer.start();
5461   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5462   _timer.stop();
5463   if (PrintCMSStatistics != 0) {
5464     gclog_or_tty->print_cr(
5465       "Finished work stealing in %dth thread: %3.3f sec",
5466       worker_id, _timer.seconds());
5467   }
5468 }
5469 
5470 // Note that parameter "i" is not used.
5471 void
5472 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5473   OopsInGenClosure* cl, ContiguousSpace* space,
5474   HeapWord** chunk_array, size_t chunk_top) {
5475   // Until all tasks completed:
5476   // . claim an unclaimed task
5477   // . compute region boundaries corresponding to task claimed
5478   //   using chunk_array
5479   // . par_oop_iterate(cl) over that region
5480 
5481   ResourceMark rm;
5482   HandleMark   hm;
5483 
5484   SequentialSubTasksDone* pst = space->par_seq_tasks();
5485 
5486   uint nth_task = 0;
5487   uint n_tasks  = pst->n_tasks();
5488 
5489   if (n_tasks > 0) {
5490     assert(pst->valid(), "Uninitialized use?");
5491     HeapWord *start, *end;
5492     while (!pst->is_task_claimed(/* reference */ nth_task)) {
5493       // We claimed task # nth_task; compute its boundaries.
5494       if (chunk_top == 0) {  // no samples were taken
5495         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5496         start = space->bottom();
5497         end   = space->top();
5498       } else if (nth_task == 0) {
5499         start = space->bottom();
5500         end   = chunk_array[nth_task];
5501       } else if (nth_task < (uint)chunk_top) {
5502         assert(nth_task >= 1, "Control point invariant");
5503         start = chunk_array[nth_task - 1];
5504         end   = chunk_array[nth_task];
5505       } else {
5506         assert(nth_task == (uint)chunk_top, "Control point invariant");
5507         start = chunk_array[chunk_top - 1];
5508         end   = space->top();
5509       }
5510       MemRegion mr(start, end);
5511       // Verify that mr is in space
5512       assert(mr.is_empty() || space->used_region().contains(mr),
5513              "Should be in space");
5514       // Verify that "start" is an object boundary
5515       assert(mr.is_empty() || oop(mr.start())->is_oop(),
5516              "Should be an oop");
5517       space->par_oop_iterate(mr, cl);
5518     }
5519     pst->all_tasks_completed();
5520   }
5521 }
5522 
5523 void
5524 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5525   CompactibleFreeListSpace* sp, int i,
5526   Par_MarkRefsIntoAndScanClosure* cl) {
5527   // Until all tasks completed:
5528   // . claim an unclaimed task
5529   // . compute region boundaries corresponding to task claimed
5530   // . transfer dirty bits ct->mut for that region
5531   // . apply rescanclosure to dirty mut bits for that region
5532 
5533   ResourceMark rm;
5534   HandleMark   hm;
5535 
5536   OopTaskQueue* work_q = work_queue(i);
5537   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5538   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5539   // CAUTION: This closure has state that persists across calls to
5540   // the work method dirty_range_iterate_clear() in that it has
5541   // embedded in it a (subtype of) UpwardsObjectClosure. The
5542   // use of that state in the embedded UpwardsObjectClosure instance
5543   // assumes that the cards are always iterated (even if in parallel
5544   // by several threads) in monotonically increasing order per each
5545   // thread. This is true of the implementation below which picks
5546   // card ranges (chunks) in monotonically increasing order globally
5547   // and, a-fortiori, in monotonically increasing order per thread
5548   // (the latter order being a subsequence of the former).
5549   // If the work code below is ever reorganized into a more chaotic
5550   // work-partitioning form than the current "sequential tasks"
5551   // paradigm, the use of that persistent state will have to be
5552   // revisited and modified appropriately. See also related
5553   // bug 4756801 work on which should examine this code to make
5554   // sure that the changes there do not run counter to the
5555   // assumptions made here and necessary for correctness and
5556   // efficiency. Note also that this code might yield inefficient
5557   // behavior in the case of very large objects that span one or
5558   // more work chunks. Such objects would potentially be scanned
5559   // several times redundantly. Work on 4756801 should try and
5560   // address that performance anomaly if at all possible. XXX
5561   MemRegion  full_span  = _collector->_span;
5562   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
5563   MarkFromDirtyCardsClosure
5564     greyRescanClosure(_collector, full_span, // entire span of interest
5565                       sp, bm, work_q, cl);
5566 
5567   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5568   assert(pst->valid(), "Uninitialized use?");
5569   uint nth_task = 0;
5570   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5571   MemRegion span = sp->used_region();
5572   HeapWord* start_addr = span.start();
5573   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5574                                            alignment);
5575   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5576   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5577          start_addr, "Check alignment");
5578   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5579          chunk_size, "Check alignment");
5580 
5581   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5582     // Having claimed the nth_task, compute corresponding mem-region,
5583     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
5584     // The alignment restriction ensures that we do not need any
5585     // synchronization with other gang-workers while setting or
5586     // clearing bits in thus chunk of the MUT.
5587     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5588                                     start_addr + (nth_task+1)*chunk_size);
5589     // The last chunk's end might be way beyond end of the
5590     // used region. In that case pull back appropriately.
5591     if (this_span.end() > end_addr) {
5592       this_span.set_end(end_addr);
5593       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5594     }
5595     // Iterate over the dirty cards covering this chunk, marking them
5596     // precleaned, and setting the corresponding bits in the mod union
5597     // table. Since we have been careful to partition at Card and MUT-word
5598     // boundaries no synchronization is needed between parallel threads.
5599     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5600                                                  &modUnionClosure);
5601 
5602     // Having transferred these marks into the modUnionTable,
5603     // rescan the marked objects on the dirty cards in the modUnionTable.
5604     // Even if this is at a synchronous collection, the initial marking
5605     // may have been done during an asynchronous collection so there
5606     // may be dirty bits in the mod-union table.
5607     _collector->_modUnionTable.dirty_range_iterate_clear(
5608                   this_span, &greyRescanClosure);
5609     _collector->_modUnionTable.verifyNoOneBitsInRange(
5610                                  this_span.start(),
5611                                  this_span.end());
5612   }
5613   pst->all_tasks_completed();  // declare that i am done
5614 }
5615 
5616 // . see if we can share work_queues with ParNew? XXX
5617 void
5618 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5619                                 int* seed) {
5620   OopTaskQueue* work_q = work_queue(i);
5621   NOT_PRODUCT(int num_steals = 0;)
5622   oop obj_to_scan;
5623   CMSBitMap* bm = &(_collector->_markBitMap);
5624 
5625   while (true) {
5626     // Completely finish any left over work from (an) earlier round(s)
5627     cl->trim_queue(0);
5628     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5629                                          (size_t)ParGCDesiredObjsFromOverflowList);
5630     // Now check if there's any work in the overflow list
5631     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5632     // only affects the number of attempts made to get work from the
5633     // overflow list and does not affect the number of workers.  Just
5634     // pass ParallelGCThreads so this behavior is unchanged.
5635     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5636                                                 work_q,
5637                                                 ParallelGCThreads)) {
5638       // found something in global overflow list;
5639       // not yet ready to go stealing work from others.
5640       // We'd like to assert(work_q->size() != 0, ...)
5641       // because we just took work from the overflow list,
5642       // but of course we can't since all of that could have
5643       // been already stolen from us.
5644       // "He giveth and He taketh away."
5645       continue;
5646     }
5647     // Verify that we have no work before we resort to stealing
5648     assert(work_q->size() == 0, "Have work, shouldn't steal");
5649     // Try to steal from other queues that have work
5650     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5651       NOT_PRODUCT(num_steals++;)
5652       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5653       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5654       // Do scanning work
5655       obj_to_scan->oop_iterate(cl);
5656       // Loop around, finish this work, and try to steal some more
5657     } else if (terminator()->offer_termination()) {
5658         break;  // nirvana from the infinite cycle
5659     }
5660   }
5661   NOT_PRODUCT(
5662     if (PrintCMSStatistics != 0) {
5663       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5664     }
5665   )
5666   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5667          "Else our work is not yet done");
5668 }
5669 
5670 // Record object boundaries in _eden_chunk_array by sampling the eden
5671 // top in the slow-path eden object allocation code path and record
5672 // the boundaries, if CMSEdenChunksRecordAlways is true. If
5673 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
5674 // sampling in sample_eden() that activates during the part of the
5675 // preclean phase.
5676 void CMSCollector::sample_eden_chunk() {
5677   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5678     if (_eden_chunk_lock->try_lock()) {
5679       // Record a sample. This is the critical section. The contents
5680       // of the _eden_chunk_array have to be non-decreasing in the
5681       // address order.
5682       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
5683       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5684              "Unexpected state of Eden");
5685       if (_eden_chunk_index == 0 ||
5686           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5687            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
5688                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5689         _eden_chunk_index++;  // commit sample
5690       }
5691       _eden_chunk_lock->unlock();
5692     }
5693   }
5694 }
5695 
5696 // Return a thread-local PLAB recording array, as appropriate.
5697 void* CMSCollector::get_data_recorder(int thr_num) {
5698   if (_survivor_plab_array != NULL &&
5699       (CMSPLABRecordAlways ||
5700        (_collectorState > Marking && _collectorState < FinalMarking))) {
5701     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5702     ChunkArray* ca = &_survivor_plab_array[thr_num];
5703     ca->reset();   // clear it so that fresh data is recorded
5704     return (void*) ca;
5705   } else {
5706     return NULL;
5707   }
5708 }
5709 
5710 // Reset all the thread-local PLAB recording arrays
5711 void CMSCollector::reset_survivor_plab_arrays() {
5712   for (uint i = 0; i < ParallelGCThreads; i++) {
5713     _survivor_plab_array[i].reset();
5714   }
5715 }
5716 
5717 // Merge the per-thread plab arrays into the global survivor chunk
5718 // array which will provide the partitioning of the survivor space
5719 // for CMS initial scan and rescan.
5720 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5721                                               int no_of_gc_threads) {
5722   assert(_survivor_plab_array  != NULL, "Error");
5723   assert(_survivor_chunk_array != NULL, "Error");
5724   assert(_collectorState == FinalMarking ||
5725          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5726   for (int j = 0; j < no_of_gc_threads; j++) {
5727     _cursor[j] = 0;
5728   }
5729   HeapWord* top = surv->top();
5730   size_t i;
5731   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5732     HeapWord* min_val = top;          // Higher than any PLAB address
5733     uint      min_tid = 0;            // position of min_val this round
5734     for (int j = 0; j < no_of_gc_threads; j++) {
5735       ChunkArray* cur_sca = &_survivor_plab_array[j];
5736       if (_cursor[j] == cur_sca->end()) {
5737         continue;
5738       }
5739       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5740       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5741       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5742       if (cur_val < min_val) {
5743         min_tid = j;
5744         min_val = cur_val;
5745       } else {
5746         assert(cur_val < top, "All recorded addresses should be less");
5747       }
5748     }
5749     // At this point min_val and min_tid are respectively
5750     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5751     // and the thread (j) that witnesses that address.
5752     // We record this address in the _survivor_chunk_array[i]
5753     // and increment _cursor[min_tid] prior to the next round i.
5754     if (min_val == top) {
5755       break;
5756     }
5757     _survivor_chunk_array[i] = min_val;
5758     _cursor[min_tid]++;
5759   }
5760   // We are all done; record the size of the _survivor_chunk_array
5761   _survivor_chunk_index = i; // exclusive: [0, i)
5762   if (PrintCMSStatistics > 0) {
5763     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5764   }
5765   // Verify that we used up all the recorded entries
5766   #ifdef ASSERT
5767     size_t total = 0;
5768     for (int j = 0; j < no_of_gc_threads; j++) {
5769       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5770       total += _cursor[j];
5771     }
5772     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5773     // Check that the merged array is in sorted order
5774     if (total > 0) {
5775       for (size_t i = 0; i < total - 1; i++) {
5776         if (PrintCMSStatistics > 0) {
5777           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5778                               i, _survivor_chunk_array[i]);
5779         }
5780         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5781                "Not sorted");
5782       }
5783     }
5784   #endif // ASSERT
5785 }
5786 
5787 // Set up the space's par_seq_tasks structure for work claiming
5788 // for parallel initial scan and rescan of young gen.
5789 // See ParRescanTask where this is currently used.
5790 void
5791 CMSCollector::
5792 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5793   assert(n_threads > 0, "Unexpected n_threads argument");
5794   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5795 
5796   // Eden space
5797   if (!dng->eden()->is_empty()) {
5798     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5799     assert(!pst->valid(), "Clobbering existing data?");
5800     // Each valid entry in [0, _eden_chunk_index) represents a task.
5801     size_t n_tasks = _eden_chunk_index + 1;
5802     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5803     // Sets the condition for completion of the subtask (how many threads
5804     // need to finish in order to be done).
5805     pst->set_n_threads(n_threads);
5806     pst->set_n_tasks((int)n_tasks);
5807   }
5808 
5809   // Merge the survivor plab arrays into _survivor_chunk_array
5810   if (_survivor_plab_array != NULL) {
5811     merge_survivor_plab_arrays(dng->from(), n_threads);
5812   } else {
5813     assert(_survivor_chunk_index == 0, "Error");
5814   }
5815 
5816   // To space
5817   {
5818     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5819     assert(!pst->valid(), "Clobbering existing data?");
5820     // Sets the condition for completion of the subtask (how many threads
5821     // need to finish in order to be done).
5822     pst->set_n_threads(n_threads);
5823     pst->set_n_tasks(1);
5824     assert(pst->valid(), "Error");
5825   }
5826 
5827   // From space
5828   {
5829     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5830     assert(!pst->valid(), "Clobbering existing data?");
5831     size_t n_tasks = _survivor_chunk_index + 1;
5832     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5833     // Sets the condition for completion of the subtask (how many threads
5834     // need to finish in order to be done).
5835     pst->set_n_threads(n_threads);
5836     pst->set_n_tasks((int)n_tasks);
5837     assert(pst->valid(), "Error");
5838   }
5839 }
5840 
5841 // Parallel version of remark
5842 void CMSCollector::do_remark_parallel() {
5843   GenCollectedHeap* gch = GenCollectedHeap::heap();
5844   FlexibleWorkGang* workers = gch->workers();
5845   assert(workers != NULL, "Need parallel worker threads.");
5846   // Choose to use the number of GC workers most recently set
5847   // into "active_workers".  If active_workers is not set, set it
5848   // to ParallelGCThreads.
5849   int n_workers = workers->active_workers();
5850   if (n_workers == 0) {
5851     assert(n_workers > 0, "Should have been set during scavenge");
5852     n_workers = ParallelGCThreads;
5853     workers->set_active_workers(n_workers);
5854   }
5855   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5856 
5857   CMSParRemarkTask tsk(this,
5858     cms_space,
5859     n_workers, workers, task_queues());
5860 
5861   // Set up for parallel process_strong_roots work.
5862   gch->set_par_threads(n_workers);
5863   // We won't be iterating over the cards in the card table updating
5864   // the younger_gen cards, so we shouldn't call the following else
5865   // the verification code as well as subsequent younger_refs_iterate
5866   // code would get confused. XXX
5867   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5868 
5869   // The young gen rescan work will not be done as part of
5870   // process_strong_roots (which currently doesn't knw how to
5871   // parallelize such a scan), but rather will be broken up into
5872   // a set of parallel tasks (via the sampling that the [abortable]
5873   // preclean phase did of EdenSpace, plus the [two] tasks of
5874   // scanning the [two] survivor spaces. Further fine-grain
5875   // parallelization of the scanning of the survivor spaces
5876   // themselves, and of precleaning of the younger gen itself
5877   // is deferred to the future.
5878   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5879 
5880   // The dirty card rescan work is broken up into a "sequence"
5881   // of parallel tasks (per constituent space) that are dynamically
5882   // claimed by the parallel threads.
5883   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5884 
5885   // It turns out that even when we're using 1 thread, doing the work in a
5886   // separate thread causes wide variance in run times.  We can't help this
5887   // in the multi-threaded case, but we special-case n=1 here to get
5888   // repeatable measurements of the 1-thread overhead of the parallel code.
5889   if (n_workers > 1) {
5890     // Make refs discovery MT-safe, if it isn't already: it may not
5891     // necessarily be so, since it's possible that we are doing
5892     // ST marking.
5893     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5894     GenCollectedHeap::StrongRootsScope srs(gch);
5895     workers->run_task(&tsk);
5896   } else {
5897     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5898     GenCollectedHeap::StrongRootsScope srs(gch);
5899     tsk.work(0);
5900   }
5901 
5902   gch->set_par_threads(0);  // 0 ==> non-parallel.
5903   // restore, single-threaded for now, any preserved marks
5904   // as a result of work_q overflow
5905   restore_preserved_marks_if_any();
5906 }
5907 
5908 // Non-parallel version of remark
5909 void CMSCollector::do_remark_non_parallel() {
5910   ResourceMark rm;
5911   HandleMark   hm;
5912   GenCollectedHeap* gch = GenCollectedHeap::heap();
5913   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5914 
5915   MarkRefsIntoAndScanClosure
5916     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5917              &_markStack, this,
5918              false /* should_yield */, false /* not precleaning */);
5919   MarkFromDirtyCardsClosure
5920     markFromDirtyCardsClosure(this, _span,
5921                               NULL,  // space is set further below
5922                               &_markBitMap, &_markStack, &mrias_cl);
5923   {
5924     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5925     // Iterate over the dirty cards, setting the corresponding bits in the
5926     // mod union table.
5927     {
5928       ModUnionClosure modUnionClosure(&_modUnionTable);
5929       _ct->ct_bs()->dirty_card_iterate(
5930                       _cmsGen->used_region(),
5931                       &modUnionClosure);
5932     }
5933     // Having transferred these marks into the modUnionTable, we just need
5934     // to rescan the marked objects on the dirty cards in the modUnionTable.
5935     // The initial marking may have been done during an asynchronous
5936     // collection so there may be dirty bits in the mod-union table.
5937     const int alignment =
5938       CardTableModRefBS::card_size * BitsPerWord;
5939     {
5940       // ... First handle dirty cards in CMS gen
5941       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5942       MemRegion ur = _cmsGen->used_region();
5943       HeapWord* lb = ur.start();
5944       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5945       MemRegion cms_span(lb, ub);
5946       _modUnionTable.dirty_range_iterate_clear(cms_span,
5947                                                &markFromDirtyCardsClosure);
5948       verify_work_stacks_empty();
5949       if (PrintCMSStatistics != 0) {
5950         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5951           markFromDirtyCardsClosure.num_dirty_cards());
5952       }
5953     }
5954   }
5955   if (VerifyDuringGC &&
5956       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5957     HandleMark hm;  // Discard invalid handles created during verification
5958     Universe::verify();
5959   }
5960   {
5961     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5962 
5963     verify_work_stacks_empty();
5964 
5965     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5966     GenCollectedHeap::StrongRootsScope srs(gch);
5967     gch->gen_process_strong_roots(_cmsGen->level(),
5968                                   true,  // younger gens as roots
5969                                   false, // use the local StrongRootsScope
5970                                   SharedHeap::ScanningOption(roots_scanning_options()),
5971                                   &mrias_cl,
5972                                   NULL,
5973                                   NULL);  // The dirty klasses will be handled below
5974 
5975     assert(should_unload_classes()
5976            || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5977            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5978   }
5979 
5980   {
5981     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5982 
5983     verify_work_stacks_empty();
5984 
5985     // Scan all class loader data objects that might have been introduced
5986     // during concurrent marking.
5987     ResourceMark rm;
5988     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5989     for (int i = 0; i < array->length(); i++) {
5990       mrias_cl.do_class_loader_data(array->at(i));
5991     }
5992 
5993     // We don't need to keep track of new CLDs anymore.
5994     ClassLoaderDataGraph::remember_new_clds(false);
5995 
5996     verify_work_stacks_empty();
5997   }
5998 
5999   {
6000     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
6001 
6002     verify_work_stacks_empty();
6003 
6004     RemarkKlassClosure remark_klass_closure(&mrias_cl);
6005     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6006 
6007     verify_work_stacks_empty();
6008   }
6009 
6010   // We might have added oops to ClassLoaderData::_handles during the
6011   // concurrent marking phase. These oops point to newly allocated objects
6012   // that are guaranteed to be kept alive. Either by the direct allocation
6013   // code, or when the young collector processes the strong roots. Hence,
6014   // we don't have to revisit the _handles block during the remark phase.
6015 
6016   verify_work_stacks_empty();
6017   // Restore evacuated mark words, if any, used for overflow list links
6018   if (!CMSOverflowEarlyRestoration) {
6019     restore_preserved_marks_if_any();
6020   }
6021   verify_overflow_empty();
6022 }
6023 
6024 ////////////////////////////////////////////////////////
6025 // Parallel Reference Processing Task Proxy Class
6026 ////////////////////////////////////////////////////////
6027 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
6028   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
6029   CMSCollector*          _collector;
6030   CMSBitMap*             _mark_bit_map;
6031   const MemRegion        _span;
6032   ProcessTask&           _task;
6033 
6034 public:
6035   CMSRefProcTaskProxy(ProcessTask&     task,
6036                       CMSCollector*    collector,
6037                       const MemRegion& span,
6038                       CMSBitMap*       mark_bit_map,
6039                       AbstractWorkGang* workers,
6040                       OopTaskQueueSet* task_queues):
6041     // XXX Should superclass AGTWOQ also know about AWG since it knows
6042     // about the task_queues used by the AWG? Then it could initialize
6043     // the terminator() object. See 6984287. The set_for_termination()
6044     // below is a temporary band-aid for the regression in 6984287.
6045     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
6046       task_queues),
6047     _task(task),
6048     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
6049   {
6050     assert(_collector->_span.equals(_span) && !_span.is_empty(),
6051            "Inconsistency in _span");
6052     set_for_termination(workers->active_workers());
6053   }
6054 
6055   OopTaskQueueSet* task_queues() { return queues(); }
6056 
6057   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6058 
6059   void do_work_steal(int i,
6060                      CMSParDrainMarkingStackClosure* drain,
6061                      CMSParKeepAliveClosure* keep_alive,
6062                      int* seed);
6063 
6064   virtual void work(uint worker_id);
6065 };
6066 
6067 void CMSRefProcTaskProxy::work(uint worker_id) {
6068   assert(_collector->_span.equals(_span), "Inconsistency in _span");
6069   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6070                                         _mark_bit_map,
6071                                         work_queue(worker_id));
6072   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6073                                                  _mark_bit_map,
6074                                                  work_queue(worker_id));
6075   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6076   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6077   if (_task.marks_oops_alive()) {
6078     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6079                   _collector->hash_seed(worker_id));
6080   }
6081   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6082   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6083 }
6084 
6085 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6086   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6087   EnqueueTask& _task;
6088 
6089 public:
6090   CMSRefEnqueueTaskProxy(EnqueueTask& task)
6091     : AbstractGangTask("Enqueue reference objects in parallel"),
6092       _task(task)
6093   { }
6094 
6095   virtual void work(uint worker_id)
6096   {
6097     _task.work(worker_id);
6098   }
6099 };
6100 
6101 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6102   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6103    _span(span),
6104    _bit_map(bit_map),
6105    _work_queue(work_queue),
6106    _mark_and_push(collector, span, bit_map, work_queue),
6107    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6108                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6109 { }
6110 
6111 // . see if we can share work_queues with ParNew? XXX
6112 void CMSRefProcTaskProxy::do_work_steal(int i,
6113   CMSParDrainMarkingStackClosure* drain,
6114   CMSParKeepAliveClosure* keep_alive,
6115   int* seed) {
6116   OopTaskQueue* work_q = work_queue(i);
6117   NOT_PRODUCT(int num_steals = 0;)
6118   oop obj_to_scan;
6119 
6120   while (true) {
6121     // Completely finish any left over work from (an) earlier round(s)
6122     drain->trim_queue(0);
6123     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6124                                          (size_t)ParGCDesiredObjsFromOverflowList);
6125     // Now check if there's any work in the overflow list
6126     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6127     // only affects the number of attempts made to get work from the
6128     // overflow list and does not affect the number of workers.  Just
6129     // pass ParallelGCThreads so this behavior is unchanged.
6130     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6131                                                 work_q,
6132                                                 ParallelGCThreads)) {
6133       // Found something in global overflow list;
6134       // not yet ready to go stealing work from others.
6135       // We'd like to assert(work_q->size() != 0, ...)
6136       // because we just took work from the overflow list,
6137       // but of course we can't, since all of that might have
6138       // been already stolen from us.
6139       continue;
6140     }
6141     // Verify that we have no work before we resort to stealing
6142     assert(work_q->size() == 0, "Have work, shouldn't steal");
6143     // Try to steal from other queues that have work
6144     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6145       NOT_PRODUCT(num_steals++;)
6146       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6147       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6148       // Do scanning work
6149       obj_to_scan->oop_iterate(keep_alive);
6150       // Loop around, finish this work, and try to steal some more
6151     } else if (terminator()->offer_termination()) {
6152       break;  // nirvana from the infinite cycle
6153     }
6154   }
6155   NOT_PRODUCT(
6156     if (PrintCMSStatistics != 0) {
6157       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6158     }
6159   )
6160 }
6161 
6162 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6163 {
6164   GenCollectedHeap* gch = GenCollectedHeap::heap();
6165   FlexibleWorkGang* workers = gch->workers();
6166   assert(workers != NULL, "Need parallel worker threads.");
6167   CMSRefProcTaskProxy rp_task(task, &_collector,
6168                               _collector.ref_processor()->span(),
6169                               _collector.markBitMap(),
6170                               workers, _collector.task_queues());
6171   workers->run_task(&rp_task);
6172 }
6173 
6174 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6175 {
6176 
6177   GenCollectedHeap* gch = GenCollectedHeap::heap();
6178   FlexibleWorkGang* workers = gch->workers();
6179   assert(workers != NULL, "Need parallel worker threads.");
6180   CMSRefEnqueueTaskProxy enq_task(task);
6181   workers->run_task(&enq_task);
6182 }
6183 
6184 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6185 
6186   ResourceMark rm;
6187   HandleMark   hm;
6188 
6189   ReferenceProcessor* rp = ref_processor();
6190   assert(rp->span().equals(_span), "Spans should be equal");
6191   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6192   // Process weak references.
6193   rp->setup_policy(clear_all_soft_refs);
6194   verify_work_stacks_empty();
6195 
6196   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6197                                           &_markStack, false /* !preclean */);
6198   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6199                                 _span, &_markBitMap, &_markStack,
6200                                 &cmsKeepAliveClosure, false /* !preclean */);
6201   {
6202     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
6203 
6204     ReferenceProcessorStats stats;
6205     if (rp->processing_is_mt()) {
6206       // Set the degree of MT here.  If the discovery is done MT, there
6207       // may have been a different number of threads doing the discovery
6208       // and a different number of discovered lists may have Ref objects.
6209       // That is OK as long as the Reference lists are balanced (see
6210       // balance_all_queues() and balance_queues()).
6211       GenCollectedHeap* gch = GenCollectedHeap::heap();
6212       int active_workers = ParallelGCThreads;
6213       FlexibleWorkGang* workers = gch->workers();
6214       if (workers != NULL) {
6215         active_workers = workers->active_workers();
6216         // The expectation is that active_workers will have already
6217         // been set to a reasonable value.  If it has not been set,
6218         // investigate.
6219         assert(active_workers > 0, "Should have been set during scavenge");
6220       }
6221       rp->set_active_mt_degree(active_workers);
6222       CMSRefProcTaskExecutor task_executor(*this);
6223       stats = rp->process_discovered_references(&_is_alive_closure,
6224                                         &cmsKeepAliveClosure,
6225                                         &cmsDrainMarkingStackClosure,
6226                                         &task_executor,
6227                                         _gc_timer_cm);
6228     } else {
6229       stats = rp->process_discovered_references(&_is_alive_closure,
6230                                         &cmsKeepAliveClosure,
6231                                         &cmsDrainMarkingStackClosure,
6232                                         NULL,
6233                                         _gc_timer_cm);
6234     }
6235     _gc_tracer_cm->report_gc_reference_stats(stats);
6236 
6237   }
6238 
6239   // This is the point where the entire marking should have completed.
6240   verify_work_stacks_empty();
6241 
6242   if (should_unload_classes()) {
6243     {
6244       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
6245 
6246       // Unload classes and purge the SystemDictionary.
6247       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6248 
6249       // Unload nmethods.
6250       CodeCache::do_unloading(&_is_alive_closure, purged_class);
6251 
6252       // Prune dead klasses from subklass/sibling/implementor lists.
6253       Klass::clean_weak_klass_links(&_is_alive_closure);
6254     }
6255 
6256     {
6257       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
6258       // Clean up unreferenced symbols in symbol table.
6259       SymbolTable::unlink();
6260     }
6261   }
6262 
6263   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
6264   // Need to check if we really scanned the StringTable.
6265   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
6266     GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
6267     // Delete entries for dead interned strings.
6268     StringTable::unlink(&_is_alive_closure);
6269   }
6270 
6271   // Restore any preserved marks as a result of mark stack or
6272   // work queue overflow
6273   restore_preserved_marks_if_any();  // done single-threaded for now
6274 
6275   rp->set_enqueuing_is_done(true);
6276   if (rp->processing_is_mt()) {
6277     rp->balance_all_queues();
6278     CMSRefProcTaskExecutor task_executor(*this);
6279     rp->enqueue_discovered_references(&task_executor);
6280   } else {
6281     rp->enqueue_discovered_references(NULL);
6282   }
6283   rp->verify_no_references_recorded();
6284   assert(!rp->discovery_enabled(), "should have been disabled");
6285 }
6286 
6287 #ifndef PRODUCT
6288 void CMSCollector::check_correct_thread_executing() {
6289   Thread* t = Thread::current();
6290   // Only the VM thread or the CMS thread should be here.
6291   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6292          "Unexpected thread type");
6293   // If this is the vm thread, the foreground process
6294   // should not be waiting.  Note that _foregroundGCIsActive is
6295   // true while the foreground collector is waiting.
6296   if (_foregroundGCShouldWait) {
6297     // We cannot be the VM thread
6298     assert(t->is_ConcurrentGC_thread(),
6299            "Should be CMS thread");
6300   } else {
6301     // We can be the CMS thread only if we are in a stop-world
6302     // phase of CMS collection.
6303     if (t->is_ConcurrentGC_thread()) {
6304       assert(_collectorState == InitialMarking ||
6305              _collectorState == FinalMarking,
6306              "Should be a stop-world phase");
6307       // The CMS thread should be holding the CMS_token.
6308       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6309              "Potential interference with concurrently "
6310              "executing VM thread");
6311     }
6312   }
6313 }
6314 #endif
6315 
6316 void CMSCollector::sweep(bool asynch) {
6317   assert(_collectorState == Sweeping, "just checking");
6318   check_correct_thread_executing();
6319   verify_work_stacks_empty();
6320   verify_overflow_empty();
6321   increment_sweep_count();
6322   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6323 
6324   _inter_sweep_timer.stop();
6325   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6326   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6327 
6328   assert(!_intra_sweep_timer.is_active(), "Should not be active");
6329   _intra_sweep_timer.reset();
6330   _intra_sweep_timer.start();
6331   if (asynch) {
6332     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6333     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6334     // First sweep the old gen
6335     {
6336       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6337                                bitMapLock());
6338       sweepWork(_cmsGen, asynch);
6339     }
6340 
6341     // Update Universe::_heap_*_at_gc figures.
6342     // We need all the free list locks to make the abstract state
6343     // transition from Sweeping to Resetting. See detailed note
6344     // further below.
6345     {
6346       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6347       // Update heap occupancy information which is used as
6348       // input to soft ref clearing policy at the next gc.
6349       Universe::update_heap_info_at_gc();
6350       _collectorState = Resizing;
6351     }
6352   } else {
6353     // already have needed locks
6354     sweepWork(_cmsGen,  asynch);
6355     // Update heap occupancy information which is used as
6356     // input to soft ref clearing policy at the next gc.
6357     Universe::update_heap_info_at_gc();
6358     _collectorState = Resizing;
6359   }
6360   verify_work_stacks_empty();
6361   verify_overflow_empty();
6362 
6363   if (should_unload_classes()) {
6364     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
6365     // requires that the virtual spaces are stable and not deleted.
6366     ClassLoaderDataGraph::set_should_purge(true);
6367   }
6368 
6369   _intra_sweep_timer.stop();
6370   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6371 
6372   _inter_sweep_timer.reset();
6373   _inter_sweep_timer.start();
6374 
6375   // We need to use a monotonically non-decreasing time in ms
6376   // or we will see time-warp warnings and os::javaTimeMillis()
6377   // does not guarantee monotonicity.
6378   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6379   update_time_of_last_gc(now);
6380 
6381   // NOTE on abstract state transitions:
6382   // Mutators allocate-live and/or mark the mod-union table dirty
6383   // based on the state of the collection.  The former is done in
6384   // the interval [Marking, Sweeping] and the latter in the interval
6385   // [Marking, Sweeping).  Thus the transitions into the Marking state
6386   // and out of the Sweeping state must be synchronously visible
6387   // globally to the mutators.
6388   // The transition into the Marking state happens with the world
6389   // stopped so the mutators will globally see it.  Sweeping is
6390   // done asynchronously by the background collector so the transition
6391   // from the Sweeping state to the Resizing state must be done
6392   // under the freelistLock (as is the check for whether to
6393   // allocate-live and whether to dirty the mod-union table).
6394   assert(_collectorState == Resizing, "Change of collector state to"
6395     " Resizing must be done under the freelistLocks (plural)");
6396 
6397   // Now that sweeping has been completed, we clear
6398   // the incremental_collection_failed flag,
6399   // thus inviting a younger gen collection to promote into
6400   // this generation. If such a promotion may still fail,
6401   // the flag will be set again when a young collection is
6402   // attempted.
6403   GenCollectedHeap* gch = GenCollectedHeap::heap();
6404   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
6405   gch->update_full_collections_completed(_collection_count_start);
6406 }
6407 
6408 // FIX ME!!! Looks like this belongs in CFLSpace, with
6409 // CMSGen merely delegating to it.
6410 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6411   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6412   HeapWord*  minAddr        = _cmsSpace->bottom();
6413   HeapWord*  largestAddr    =
6414     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6415   if (largestAddr == NULL) {
6416     // The dictionary appears to be empty.  In this case
6417     // try to coalesce at the end of the heap.
6418     largestAddr = _cmsSpace->end();
6419   }
6420   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
6421   size_t nearLargestOffset =
6422     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6423   if (PrintFLSStatistics != 0) {
6424     gclog_or_tty->print_cr(
6425       "CMS: Large Block: " PTR_FORMAT ";"
6426       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6427       largestAddr,
6428       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6429   }
6430   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6431 }
6432 
6433 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6434   return addr >= _cmsSpace->nearLargestChunk();
6435 }
6436 
6437 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6438   return _cmsSpace->find_chunk_at_end();
6439 }
6440 
6441 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6442                                                     bool full) {
6443   // The next lower level has been collected.  Gather any statistics
6444   // that are of interest at this point.
6445   if (!full && (current_level + 1) == level()) {
6446     // Gather statistics on the young generation collection.
6447     collector()->stats().record_gc0_end(used());
6448   }
6449 }
6450 
6451 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6452   GenCollectedHeap* gch = GenCollectedHeap::heap();
6453   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6454     "Wrong type of heap");
6455   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6456     gch->gen_policy()->size_policy();
6457   assert(sp->is_gc_cms_adaptive_size_policy(),
6458     "Wrong type of size policy");
6459   return sp;
6460 }
6461 
6462 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6463   if (PrintGCDetails && Verbose) {
6464     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6465   }
6466   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6467   _debug_collection_type =
6468     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6469   if (PrintGCDetails && Verbose) {
6470     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6471   }
6472 }
6473 
6474 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6475   bool asynch) {
6476   // We iterate over the space(s) underlying this generation,
6477   // checking the mark bit map to see if the bits corresponding
6478   // to specific blocks are marked or not. Blocks that are
6479   // marked are live and are not swept up. All remaining blocks
6480   // are swept up, with coalescing on-the-fly as we sweep up
6481   // contiguous free and/or garbage blocks:
6482   // We need to ensure that the sweeper synchronizes with allocators
6483   // and stop-the-world collectors. In particular, the following
6484   // locks are used:
6485   // . CMS token: if this is held, a stop the world collection cannot occur
6486   // . freelistLock: if this is held no allocation can occur from this
6487   //                 generation by another thread
6488   // . bitMapLock: if this is held, no other thread can access or update
6489   //
6490 
6491   // Note that we need to hold the freelistLock if we use
6492   // block iterate below; else the iterator might go awry if
6493   // a mutator (or promotion) causes block contents to change
6494   // (for instance if the allocator divvies up a block).
6495   // If we hold the free list lock, for all practical purposes
6496   // young generation GC's can't occur (they'll usually need to
6497   // promote), so we might as well prevent all young generation
6498   // GC's while we do a sweeping step. For the same reason, we might
6499   // as well take the bit map lock for the entire duration
6500 
6501   // check that we hold the requisite locks
6502   assert(have_cms_token(), "Should hold cms token");
6503   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6504          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6505         "Should possess CMS token to sweep");
6506   assert_lock_strong(gen->freelistLock());
6507   assert_lock_strong(bitMapLock());
6508 
6509   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6510   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
6511   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6512                                       _inter_sweep_estimate.padded_average(),
6513                                       _intra_sweep_estimate.padded_average());
6514   gen->setNearLargestChunk();
6515 
6516   {
6517     SweepClosure sweepClosure(this, gen, &_markBitMap,
6518                             CMSYield && asynch);
6519     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6520     // We need to free-up/coalesce garbage/blocks from a
6521     // co-terminal free run. This is done in the SweepClosure
6522     // destructor; so, do not remove this scope, else the
6523     // end-of-sweep-census below will be off by a little bit.
6524   }
6525   gen->cmsSpace()->sweep_completed();
6526   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6527   if (should_unload_classes()) {                // unloaded classes this cycle,
6528     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6529   } else {                                      // did not unload classes,
6530     _concurrent_cycles_since_last_unload++;     // ... increment count
6531   }
6532 }
6533 
6534 // Reset CMS data structures (for now just the marking bit map)
6535 // preparatory for the next cycle.
6536 void CMSCollector::reset(bool asynch) {
6537   GenCollectedHeap* gch = GenCollectedHeap::heap();
6538   CMSAdaptiveSizePolicy* sp = size_policy();
6539   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6540   if (asynch) {
6541     CMSTokenSyncWithLocks ts(true, bitMapLock());
6542 
6543     // If the state is not "Resetting", the foreground  thread
6544     // has done a collection and the resetting.
6545     if (_collectorState != Resetting) {
6546       assert(_collectorState == Idling, "The state should only change"
6547         " because the foreground collector has finished the collection");
6548       return;
6549     }
6550 
6551     // Clear the mark bitmap (no grey objects to start with)
6552     // for the next cycle.
6553     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6554     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6555 
6556     HeapWord* curAddr = _markBitMap.startWord();
6557     while (curAddr < _markBitMap.endWord()) {
6558       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6559       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6560       _markBitMap.clear_large_range(chunk);
6561       if (ConcurrentMarkSweepThread::should_yield() &&
6562           !foregroundGCIsActive() &&
6563           CMSYield) {
6564         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6565                "CMS thread should hold CMS token");
6566         assert_lock_strong(bitMapLock());
6567         bitMapLock()->unlock();
6568         ConcurrentMarkSweepThread::desynchronize(true);
6569         ConcurrentMarkSweepThread::acknowledge_yield_request();
6570         stopTimer();
6571         if (PrintCMSStatistics != 0) {
6572           incrementYields();
6573         }
6574         icms_wait();
6575 
6576         // See the comment in coordinator_yield()
6577         for (unsigned i = 0; i < CMSYieldSleepCount &&
6578                          ConcurrentMarkSweepThread::should_yield() &&
6579                          !CMSCollector::foregroundGCIsActive(); ++i) {
6580           os::sleep(Thread::current(), 1, false);
6581           ConcurrentMarkSweepThread::acknowledge_yield_request();
6582         }
6583 
6584         ConcurrentMarkSweepThread::synchronize(true);
6585         bitMapLock()->lock_without_safepoint_check();
6586         startTimer();
6587       }
6588       curAddr = chunk.end();
6589     }
6590     // A successful mostly concurrent collection has been done.
6591     // Because only the full (i.e., concurrent mode failure) collections
6592     // are being measured for gc overhead limits, clean the "near" flag
6593     // and count.
6594     sp->reset_gc_overhead_limit_count();
6595     _collectorState = Idling;
6596   } else {
6597     // already have the lock
6598     assert(_collectorState == Resetting, "just checking");
6599     assert_lock_strong(bitMapLock());
6600     _markBitMap.clear_all();
6601     _collectorState = Idling;
6602   }
6603 
6604   // Stop incremental mode after a cycle completes, so that any future cycles
6605   // are triggered by allocation.
6606   stop_icms();
6607 
6608   NOT_PRODUCT(
6609     if (RotateCMSCollectionTypes) {
6610       _cmsGen->rotate_debug_collection_type();
6611     }
6612   )
6613 
6614   register_gc_end();
6615 }
6616 
6617 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6618   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6619   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6620   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
6621   TraceCollectorStats tcs(counters());
6622 
6623   switch (op) {
6624     case CMS_op_checkpointRootsInitial: {
6625       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6626       checkpointRootsInitial(true);       // asynch
6627       if (PrintGC) {
6628         _cmsGen->printOccupancy("initial-mark");
6629       }
6630       break;
6631     }
6632     case CMS_op_checkpointRootsFinal: {
6633       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6634       checkpointRootsFinal(true,    // asynch
6635                            false,   // !clear_all_soft_refs
6636                            false);  // !init_mark_was_synchronous
6637       if (PrintGC) {
6638         _cmsGen->printOccupancy("remark");
6639       }
6640       break;
6641     }
6642     default:
6643       fatal("No such CMS_op");
6644   }
6645 }
6646 
6647 #ifndef PRODUCT
6648 size_t const CMSCollector::skip_header_HeapWords() {
6649   return FreeChunk::header_size();
6650 }
6651 
6652 // Try and collect here conditions that should hold when
6653 // CMS thread is exiting. The idea is that the foreground GC
6654 // thread should not be blocked if it wants to terminate
6655 // the CMS thread and yet continue to run the VM for a while
6656 // after that.
6657 void CMSCollector::verify_ok_to_terminate() const {
6658   assert(Thread::current()->is_ConcurrentGC_thread(),
6659          "should be called by CMS thread");
6660   assert(!_foregroundGCShouldWait, "should be false");
6661   // We could check here that all the various low-level locks
6662   // are not held by the CMS thread, but that is overkill; see
6663   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6664   // is checked.
6665 }
6666 #endif
6667 
6668 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6669    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6670           "missing Printezis mark?");
6671   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6672   size_t size = pointer_delta(nextOneAddr + 1, addr);
6673   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6674          "alignment problem");
6675   assert(size >= 3, "Necessary for Printezis marks to work");
6676   return size;
6677 }
6678 
6679 // A variant of the above (block_size_using_printezis_bits()) except
6680 // that we return 0 if the P-bits are not yet set.
6681 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6682   if (_markBitMap.isMarked(addr + 1)) {
6683     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6684     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6685     size_t size = pointer_delta(nextOneAddr + 1, addr);
6686     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6687            "alignment problem");
6688     assert(size >= 3, "Necessary for Printezis marks to work");
6689     return size;
6690   }
6691   return 0;
6692 }
6693 
6694 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6695   size_t sz = 0;
6696   oop p = (oop)addr;
6697   if (p->klass_or_null() != NULL) {
6698     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6699   } else {
6700     sz = block_size_using_printezis_bits(addr);
6701   }
6702   assert(sz > 0, "size must be nonzero");
6703   HeapWord* next_block = addr + sz;
6704   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
6705                                              CardTableModRefBS::card_size);
6706   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
6707          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6708          "must be different cards");
6709   return next_card;
6710 }
6711 
6712 
6713 // CMS Bit Map Wrapper /////////////////////////////////////////
6714 
6715 // Construct a CMS bit map infrastructure, but don't create the
6716 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6717 // further below.
6718 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6719   _bm(),
6720   _shifter(shifter),
6721   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6722 {
6723   _bmStartWord = 0;
6724   _bmWordSize  = 0;
6725 }
6726 
6727 bool CMSBitMap::allocate(MemRegion mr) {
6728   _bmStartWord = mr.start();
6729   _bmWordSize  = mr.word_size();
6730   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6731                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6732   if (!brs.is_reserved()) {
6733     warning("CMS bit map allocation failure");
6734     return false;
6735   }
6736   // For now we'll just commit all of the bit map up front.
6737   // Later on we'll try to be more parsimonious with swap.
6738   if (!_virtual_space.initialize(brs, brs.size())) {
6739     warning("CMS bit map backing store failure");
6740     return false;
6741   }
6742   assert(_virtual_space.committed_size() == brs.size(),
6743          "didn't reserve backing store for all of CMS bit map?");
6744   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6745   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6746          _bmWordSize, "inconsistency in bit map sizing");
6747   _bm.set_size(_bmWordSize >> _shifter);
6748 
6749   // bm.clear(); // can we rely on getting zero'd memory? verify below
6750   assert(isAllClear(),
6751          "Expected zero'd memory from ReservedSpace constructor");
6752   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6753          "consistency check");
6754   return true;
6755 }
6756 
6757 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6758   HeapWord *next_addr, *end_addr, *last_addr;
6759   assert_locked();
6760   assert(covers(mr), "out-of-range error");
6761   // XXX assert that start and end are appropriately aligned
6762   for (next_addr = mr.start(), end_addr = mr.end();
6763        next_addr < end_addr; next_addr = last_addr) {
6764     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6765     last_addr = dirty_region.end();
6766     if (!dirty_region.is_empty()) {
6767       cl->do_MemRegion(dirty_region);
6768     } else {
6769       assert(last_addr == end_addr, "program logic");
6770       return;
6771     }
6772   }
6773 }
6774 
6775 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6776   _bm.print_on_error(st, prefix);
6777 }
6778 
6779 #ifndef PRODUCT
6780 void CMSBitMap::assert_locked() const {
6781   CMSLockVerifier::assert_locked(lock());
6782 }
6783 
6784 bool CMSBitMap::covers(MemRegion mr) const {
6785   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6786   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6787          "size inconsistency");
6788   return (mr.start() >= _bmStartWord) &&
6789          (mr.end()   <= endWord());
6790 }
6791 
6792 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6793     return (start >= _bmStartWord && (start + size) <= endWord());
6794 }
6795 
6796 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6797   // verify that there are no 1 bits in the interval [left, right)
6798   FalseBitMapClosure falseBitMapClosure;
6799   iterate(&falseBitMapClosure, left, right);
6800 }
6801 
6802 void CMSBitMap::region_invariant(MemRegion mr)
6803 {
6804   assert_locked();
6805   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6806   assert(!mr.is_empty(), "unexpected empty region");
6807   assert(covers(mr), "mr should be covered by bit map");
6808   // convert address range into offset range
6809   size_t start_ofs = heapWordToOffset(mr.start());
6810   // Make sure that end() is appropriately aligned
6811   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6812                         (1 << (_shifter+LogHeapWordSize))),
6813          "Misaligned mr.end()");
6814   size_t end_ofs   = heapWordToOffset(mr.end());
6815   assert(end_ofs > start_ofs, "Should mark at least one bit");
6816 }
6817 
6818 #endif
6819 
6820 bool CMSMarkStack::allocate(size_t size) {
6821   // allocate a stack of the requisite depth
6822   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6823                    size * sizeof(oop)));
6824   if (!rs.is_reserved()) {
6825     warning("CMSMarkStack allocation failure");
6826     return false;
6827   }
6828   if (!_virtual_space.initialize(rs, rs.size())) {
6829     warning("CMSMarkStack backing store failure");
6830     return false;
6831   }
6832   assert(_virtual_space.committed_size() == rs.size(),
6833          "didn't reserve backing store for all of CMS stack?");
6834   _base = (oop*)(_virtual_space.low());
6835   _index = 0;
6836   _capacity = size;
6837   NOT_PRODUCT(_max_depth = 0);
6838   return true;
6839 }
6840 
6841 // XXX FIX ME !!! In the MT case we come in here holding a
6842 // leaf lock. For printing we need to take a further lock
6843 // which has lower rank. We need to recalibrate the two
6844 // lock-ranks involved in order to be able to print the
6845 // messages below. (Or defer the printing to the caller.
6846 // For now we take the expedient path of just disabling the
6847 // messages for the problematic case.)
6848 void CMSMarkStack::expand() {
6849   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6850   if (_capacity == MarkStackSizeMax) {
6851     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6852       // We print a warning message only once per CMS cycle.
6853       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6854     }
6855     return;
6856   }
6857   // Double capacity if possible
6858   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6859   // Do not give up existing stack until we have managed to
6860   // get the double capacity that we desired.
6861   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6862                    new_capacity * sizeof(oop)));
6863   if (rs.is_reserved()) {
6864     // Release the backing store associated with old stack
6865     _virtual_space.release();
6866     // Reinitialize virtual space for new stack
6867     if (!_virtual_space.initialize(rs, rs.size())) {
6868       fatal("Not enough swap for expanded marking stack");
6869     }
6870     _base = (oop*)(_virtual_space.low());
6871     _index = 0;
6872     _capacity = new_capacity;
6873   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6874     // Failed to double capacity, continue;
6875     // we print a detail message only once per CMS cycle.
6876     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6877             SIZE_FORMAT"K",
6878             _capacity / K, new_capacity / K);
6879   }
6880 }
6881 
6882 
6883 // Closures
6884 // XXX: there seems to be a lot of code  duplication here;
6885 // should refactor and consolidate common code.
6886 
6887 // This closure is used to mark refs into the CMS generation in
6888 // the CMS bit map. Called at the first checkpoint. This closure
6889 // assumes that we do not need to re-mark dirty cards; if the CMS
6890 // generation on which this is used is not an oldest
6891 // generation then this will lose younger_gen cards!
6892 
6893 MarkRefsIntoClosure::MarkRefsIntoClosure(
6894   MemRegion span, CMSBitMap* bitMap):
6895     _span(span),
6896     _bitMap(bitMap)
6897 {
6898     assert(_ref_processor == NULL, "deliberately left NULL");
6899     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6900 }
6901 
6902 void MarkRefsIntoClosure::do_oop(oop obj) {
6903   // if p points into _span, then mark corresponding bit in _markBitMap
6904   assert(obj->is_oop(), "expected an oop");
6905   HeapWord* addr = (HeapWord*)obj;
6906   if (_span.contains(addr)) {
6907     // this should be made more efficient
6908     _bitMap->mark(addr);
6909   }
6910 }
6911 
6912 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6913 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6914 
6915 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6916   MemRegion span, CMSBitMap* bitMap):
6917     _span(span),
6918     _bitMap(bitMap)
6919 {
6920     assert(_ref_processor == NULL, "deliberately left NULL");
6921     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6922 }
6923 
6924 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6925   // if p points into _span, then mark corresponding bit in _markBitMap
6926   assert(obj->is_oop(), "expected an oop");
6927   HeapWord* addr = (HeapWord*)obj;
6928   if (_span.contains(addr)) {
6929     // this should be made more efficient
6930     _bitMap->par_mark(addr);
6931   }
6932 }
6933 
6934 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6935 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6936 
6937 // A variant of the above, used for CMS marking verification.
6938 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6939   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6940     _span(span),
6941     _verification_bm(verification_bm),
6942     _cms_bm(cms_bm)
6943 {
6944     assert(_ref_processor == NULL, "deliberately left NULL");
6945     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6946 }
6947 
6948 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6949   // if p points into _span, then mark corresponding bit in _markBitMap
6950   assert(obj->is_oop(), "expected an oop");
6951   HeapWord* addr = (HeapWord*)obj;
6952   if (_span.contains(addr)) {
6953     _verification_bm->mark(addr);
6954     if (!_cms_bm->isMarked(addr)) {
6955       oop(addr)->print();
6956       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6957       fatal("... aborting");
6958     }
6959   }
6960 }
6961 
6962 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6963 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6964 
6965 //////////////////////////////////////////////////
6966 // MarkRefsIntoAndScanClosure
6967 //////////////////////////////////////////////////
6968 
6969 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6970                                                        ReferenceProcessor* rp,
6971                                                        CMSBitMap* bit_map,
6972                                                        CMSBitMap* mod_union_table,
6973                                                        CMSMarkStack*  mark_stack,
6974                                                        CMSCollector* collector,
6975                                                        bool should_yield,
6976                                                        bool concurrent_precleaning):
6977   _collector(collector),
6978   _span(span),
6979   _bit_map(bit_map),
6980   _mark_stack(mark_stack),
6981   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6982                       mark_stack, concurrent_precleaning),
6983   _yield(should_yield),
6984   _concurrent_precleaning(concurrent_precleaning),
6985   _freelistLock(NULL)
6986 {
6987   _ref_processor = rp;
6988   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6989 }
6990 
6991 // This closure is used to mark refs into the CMS generation at the
6992 // second (final) checkpoint, and to scan and transitively follow
6993 // the unmarked oops. It is also used during the concurrent precleaning
6994 // phase while scanning objects on dirty cards in the CMS generation.
6995 // The marks are made in the marking bit map and the marking stack is
6996 // used for keeping the (newly) grey objects during the scan.
6997 // The parallel version (Par_...) appears further below.
6998 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6999   if (obj != NULL) {
7000     assert(obj->is_oop(), "expected an oop");
7001     HeapWord* addr = (HeapWord*)obj;
7002     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7003     assert(_collector->overflow_list_is_empty(),
7004            "overflow list should be empty");
7005     if (_span.contains(addr) &&
7006         !_bit_map->isMarked(addr)) {
7007       // mark bit map (object is now grey)
7008       _bit_map->mark(addr);
7009       // push on marking stack (stack should be empty), and drain the
7010       // stack by applying this closure to the oops in the oops popped
7011       // from the stack (i.e. blacken the grey objects)
7012       bool res = _mark_stack->push(obj);
7013       assert(res, "Should have space to push on empty stack");
7014       do {
7015         oop new_oop = _mark_stack->pop();
7016         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7017         assert(_bit_map->isMarked((HeapWord*)new_oop),
7018                "only grey objects on this stack");
7019         // iterate over the oops in this oop, marking and pushing
7020         // the ones in CMS heap (i.e. in _span).
7021         new_oop->oop_iterate(&_pushAndMarkClosure);
7022         // check if it's time to yield
7023         do_yield_check();
7024       } while (!_mark_stack->isEmpty() ||
7025                (!_concurrent_precleaning && take_from_overflow_list()));
7026         // if marking stack is empty, and we are not doing this
7027         // during precleaning, then check the overflow list
7028     }
7029     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7030     assert(_collector->overflow_list_is_empty(),
7031            "overflow list was drained above");
7032     // We could restore evacuated mark words, if any, used for
7033     // overflow list links here because the overflow list is
7034     // provably empty here. That would reduce the maximum
7035     // size requirements for preserved_{oop,mark}_stack.
7036     // But we'll just postpone it until we are all done
7037     // so we can just stream through.
7038     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
7039       _collector->restore_preserved_marks_if_any();
7040       assert(_collector->no_preserved_marks(), "No preserved marks");
7041     }
7042     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
7043            "All preserved marks should have been restored above");
7044   }
7045 }
7046 
7047 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7048 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7049 
7050 void MarkRefsIntoAndScanClosure::do_yield_work() {
7051   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7052          "CMS thread should hold CMS token");
7053   assert_lock_strong(_freelistLock);
7054   assert_lock_strong(_bit_map->lock());
7055   // relinquish the free_list_lock and bitMaplock()
7056   _bit_map->lock()->unlock();
7057   _freelistLock->unlock();
7058   ConcurrentMarkSweepThread::desynchronize(true);
7059   ConcurrentMarkSweepThread::acknowledge_yield_request();
7060   _collector->stopTimer();
7061   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7062   if (PrintCMSStatistics != 0) {
7063     _collector->incrementYields();
7064   }
7065   _collector->icms_wait();
7066 
7067   // See the comment in coordinator_yield()
7068   for (unsigned i = 0;
7069        i < CMSYieldSleepCount &&
7070        ConcurrentMarkSweepThread::should_yield() &&
7071        !CMSCollector::foregroundGCIsActive();
7072        ++i) {
7073     os::sleep(Thread::current(), 1, false);
7074     ConcurrentMarkSweepThread::acknowledge_yield_request();
7075   }
7076 
7077   ConcurrentMarkSweepThread::synchronize(true);
7078   _freelistLock->lock_without_safepoint_check();
7079   _bit_map->lock()->lock_without_safepoint_check();
7080   _collector->startTimer();
7081 }
7082 
7083 ///////////////////////////////////////////////////////////
7084 // Par_MarkRefsIntoAndScanClosure: a parallel version of
7085 //                                 MarkRefsIntoAndScanClosure
7086 ///////////////////////////////////////////////////////////
7087 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7088   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7089   CMSBitMap* bit_map, OopTaskQueue* work_queue):
7090   _span(span),
7091   _bit_map(bit_map),
7092   _work_queue(work_queue),
7093   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7094                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7095   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7096 {
7097   _ref_processor = rp;
7098   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7099 }
7100 
7101 // This closure is used to mark refs into the CMS generation at the
7102 // second (final) checkpoint, and to scan and transitively follow
7103 // the unmarked oops. The marks are made in the marking bit map and
7104 // the work_queue is used for keeping the (newly) grey objects during
7105 // the scan phase whence they are also available for stealing by parallel
7106 // threads. Since the marking bit map is shared, updates are
7107 // synchronized (via CAS).
7108 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7109   if (obj != NULL) {
7110     // Ignore mark word because this could be an already marked oop
7111     // that may be chained at the end of the overflow list.
7112     assert(obj->is_oop(true), "expected an oop");
7113     HeapWord* addr = (HeapWord*)obj;
7114     if (_span.contains(addr) &&
7115         !_bit_map->isMarked(addr)) {
7116       // mark bit map (object will become grey):
7117       // It is possible for several threads to be
7118       // trying to "claim" this object concurrently;
7119       // the unique thread that succeeds in marking the
7120       // object first will do the subsequent push on
7121       // to the work queue (or overflow list).
7122       if (_bit_map->par_mark(addr)) {
7123         // push on work_queue (which may not be empty), and trim the
7124         // queue to an appropriate length by applying this closure to
7125         // the oops in the oops popped from the stack (i.e. blacken the
7126         // grey objects)
7127         bool res = _work_queue->push(obj);
7128         assert(res, "Low water mark should be less than capacity?");
7129         trim_queue(_low_water_mark);
7130       } // Else, another thread claimed the object
7131     }
7132   }
7133 }
7134 
7135 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7136 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7137 
7138 // This closure is used to rescan the marked objects on the dirty cards
7139 // in the mod union table and the card table proper.
7140 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7141   oop p, MemRegion mr) {
7142 
7143   size_t size = 0;
7144   HeapWord* addr = (HeapWord*)p;
7145   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7146   assert(_span.contains(addr), "we are scanning the CMS generation");
7147   // check if it's time to yield
7148   if (do_yield_check()) {
7149     // We yielded for some foreground stop-world work,
7150     // and we have been asked to abort this ongoing preclean cycle.
7151     return 0;
7152   }
7153   if (_bitMap->isMarked(addr)) {
7154     // it's marked; is it potentially uninitialized?
7155     if (p->klass_or_null() != NULL) {
7156         // an initialized object; ignore mark word in verification below
7157         // since we are running concurrent with mutators
7158         assert(p->is_oop(true), "should be an oop");
7159         if (p->is_objArray()) {
7160           // objArrays are precisely marked; restrict scanning
7161           // to dirty cards only.
7162           size = CompactibleFreeListSpace::adjustObjectSize(
7163                    p->oop_iterate(_scanningClosure, mr));
7164         } else {
7165           // A non-array may have been imprecisely marked; we need
7166           // to scan object in its entirety.
7167           size = CompactibleFreeListSpace::adjustObjectSize(
7168                    p->oop_iterate(_scanningClosure));
7169         }
7170         #ifdef ASSERT
7171           size_t direct_size =
7172             CompactibleFreeListSpace::adjustObjectSize(p->size());
7173           assert(size == direct_size, "Inconsistency in size");
7174           assert(size >= 3, "Necessary for Printezis marks to work");
7175           if (!_bitMap->isMarked(addr+1)) {
7176             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7177           } else {
7178             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7179             assert(_bitMap->isMarked(addr+size-1),
7180                    "inconsistent Printezis mark");
7181           }
7182         #endif // ASSERT
7183     } else {
7184       // An uninitialized object.
7185       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7186       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7187       size = pointer_delta(nextOneAddr + 1, addr);
7188       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7189              "alignment problem");
7190       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7191       // will dirty the card when the klass pointer is installed in the
7192       // object (signaling the completion of initialization).
7193     }
7194   } else {
7195     // Either a not yet marked object or an uninitialized object
7196     if (p->klass_or_null() == NULL) {
7197       // An uninitialized object, skip to the next card, since
7198       // we may not be able to read its P-bits yet.
7199       assert(size == 0, "Initial value");
7200     } else {
7201       // An object not (yet) reached by marking: we merely need to
7202       // compute its size so as to go look at the next block.
7203       assert(p->is_oop(true), "should be an oop");
7204       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7205     }
7206   }
7207   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7208   return size;
7209 }
7210 
7211 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7212   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7213          "CMS thread should hold CMS token");
7214   assert_lock_strong(_freelistLock);
7215   assert_lock_strong(_bitMap->lock());
7216   // relinquish the free_list_lock and bitMaplock()
7217   _bitMap->lock()->unlock();
7218   _freelistLock->unlock();
7219   ConcurrentMarkSweepThread::desynchronize(true);
7220   ConcurrentMarkSweepThread::acknowledge_yield_request();
7221   _collector->stopTimer();
7222   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7223   if (PrintCMSStatistics != 0) {
7224     _collector->incrementYields();
7225   }
7226   _collector->icms_wait();
7227 
7228   // See the comment in coordinator_yield()
7229   for (unsigned i = 0; i < CMSYieldSleepCount &&
7230                    ConcurrentMarkSweepThread::should_yield() &&
7231                    !CMSCollector::foregroundGCIsActive(); ++i) {
7232     os::sleep(Thread::current(), 1, false);
7233     ConcurrentMarkSweepThread::acknowledge_yield_request();
7234   }
7235 
7236   ConcurrentMarkSweepThread::synchronize(true);
7237   _freelistLock->lock_without_safepoint_check();
7238   _bitMap->lock()->lock_without_safepoint_check();
7239   _collector->startTimer();
7240 }
7241 
7242 
7243 //////////////////////////////////////////////////////////////////
7244 // SurvivorSpacePrecleanClosure
7245 //////////////////////////////////////////////////////////////////
7246 // This (single-threaded) closure is used to preclean the oops in
7247 // the survivor spaces.
7248 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7249 
7250   HeapWord* addr = (HeapWord*)p;
7251   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7252   assert(!_span.contains(addr), "we are scanning the survivor spaces");
7253   assert(p->klass_or_null() != NULL, "object should be initialized");
7254   // an initialized object; ignore mark word in verification below
7255   // since we are running concurrent with mutators
7256   assert(p->is_oop(true), "should be an oop");
7257   // Note that we do not yield while we iterate over
7258   // the interior oops of p, pushing the relevant ones
7259   // on our marking stack.
7260   size_t size = p->oop_iterate(_scanning_closure);
7261   do_yield_check();
7262   // Observe that below, we do not abandon the preclean
7263   // phase as soon as we should; rather we empty the
7264   // marking stack before returning. This is to satisfy
7265   // some existing assertions. In general, it may be a
7266   // good idea to abort immediately and complete the marking
7267   // from the grey objects at a later time.
7268   while (!_mark_stack->isEmpty()) {
7269     oop new_oop = _mark_stack->pop();
7270     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7271     assert(_bit_map->isMarked((HeapWord*)new_oop),
7272            "only grey objects on this stack");
7273     // iterate over the oops in this oop, marking and pushing
7274     // the ones in CMS heap (i.e. in _span).
7275     new_oop->oop_iterate(_scanning_closure);
7276     // check if it's time to yield
7277     do_yield_check();
7278   }
7279   unsigned int after_count =
7280     GenCollectedHeap::heap()->total_collections();
7281   bool abort = (_before_count != after_count) ||
7282                _collector->should_abort_preclean();
7283   return abort ? 0 : size;
7284 }
7285 
7286 void SurvivorSpacePrecleanClosure::do_yield_work() {
7287   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7288          "CMS thread should hold CMS token");
7289   assert_lock_strong(_bit_map->lock());
7290   // Relinquish the bit map lock
7291   _bit_map->lock()->unlock();
7292   ConcurrentMarkSweepThread::desynchronize(true);
7293   ConcurrentMarkSweepThread::acknowledge_yield_request();
7294   _collector->stopTimer();
7295   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7296   if (PrintCMSStatistics != 0) {
7297     _collector->incrementYields();
7298   }
7299   _collector->icms_wait();
7300 
7301   // See the comment in coordinator_yield()
7302   for (unsigned i = 0; i < CMSYieldSleepCount &&
7303                        ConcurrentMarkSweepThread::should_yield() &&
7304                        !CMSCollector::foregroundGCIsActive(); ++i) {
7305     os::sleep(Thread::current(), 1, false);
7306     ConcurrentMarkSweepThread::acknowledge_yield_request();
7307   }
7308 
7309   ConcurrentMarkSweepThread::synchronize(true);
7310   _bit_map->lock()->lock_without_safepoint_check();
7311   _collector->startTimer();
7312 }
7313 
7314 // This closure is used to rescan the marked objects on the dirty cards
7315 // in the mod union table and the card table proper. In the parallel
7316 // case, although the bitMap is shared, we do a single read so the
7317 // isMarked() query is "safe".
7318 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7319   // Ignore mark word because we are running concurrent with mutators
7320   assert(p->is_oop_or_null(true), "expected an oop or null");
7321   HeapWord* addr = (HeapWord*)p;
7322   assert(_span.contains(addr), "we are scanning the CMS generation");
7323   bool is_obj_array = false;
7324   #ifdef ASSERT
7325     if (!_parallel) {
7326       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7327       assert(_collector->overflow_list_is_empty(),
7328              "overflow list should be empty");
7329 
7330     }
7331   #endif // ASSERT
7332   if (_bit_map->isMarked(addr)) {
7333     // Obj arrays are precisely marked, non-arrays are not;
7334     // so we scan objArrays precisely and non-arrays in their
7335     // entirety.
7336     if (p->is_objArray()) {
7337       is_obj_array = true;
7338       if (_parallel) {
7339         p->oop_iterate(_par_scan_closure, mr);
7340       } else {
7341         p->oop_iterate(_scan_closure, mr);
7342       }
7343     } else {
7344       if (_parallel) {
7345         p->oop_iterate(_par_scan_closure);
7346       } else {
7347         p->oop_iterate(_scan_closure);
7348       }
7349     }
7350   }
7351   #ifdef ASSERT
7352     if (!_parallel) {
7353       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7354       assert(_collector->overflow_list_is_empty(),
7355              "overflow list should be empty");
7356 
7357     }
7358   #endif // ASSERT
7359   return is_obj_array;
7360 }
7361 
7362 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7363                         MemRegion span,
7364                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
7365                         bool should_yield, bool verifying):
7366   _collector(collector),
7367   _span(span),
7368   _bitMap(bitMap),
7369   _mut(&collector->_modUnionTable),
7370   _markStack(markStack),
7371   _yield(should_yield),
7372   _skipBits(0)
7373 {
7374   assert(_markStack->isEmpty(), "stack should be empty");
7375   _finger = _bitMap->startWord();
7376   _threshold = _finger;
7377   assert(_collector->_restart_addr == NULL, "Sanity check");
7378   assert(_span.contains(_finger), "Out of bounds _finger?");
7379   DEBUG_ONLY(_verifying = verifying;)
7380 }
7381 
7382 void MarkFromRootsClosure::reset(HeapWord* addr) {
7383   assert(_markStack->isEmpty(), "would cause duplicates on stack");
7384   assert(_span.contains(addr), "Out of bounds _finger?");
7385   _finger = addr;
7386   _threshold = (HeapWord*)round_to(
7387                  (intptr_t)_finger, CardTableModRefBS::card_size);
7388 }
7389 
7390 // Should revisit to see if this should be restructured for
7391 // greater efficiency.
7392 bool MarkFromRootsClosure::do_bit(size_t offset) {
7393   if (_skipBits > 0) {
7394     _skipBits--;
7395     return true;
7396   }
7397   // convert offset into a HeapWord*
7398   HeapWord* addr = _bitMap->startWord() + offset;
7399   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7400          "address out of range");
7401   assert(_bitMap->isMarked(addr), "tautology");
7402   if (_bitMap->isMarked(addr+1)) {
7403     // this is an allocated but not yet initialized object
7404     assert(_skipBits == 0, "tautology");
7405     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
7406     oop p = oop(addr);
7407     if (p->klass_or_null() == NULL) {
7408       DEBUG_ONLY(if (!_verifying) {)
7409         // We re-dirty the cards on which this object lies and increase
7410         // the _threshold so that we'll come back to scan this object
7411         // during the preclean or remark phase. (CMSCleanOnEnter)
7412         if (CMSCleanOnEnter) {
7413           size_t sz = _collector->block_size_using_printezis_bits(addr);
7414           HeapWord* end_card_addr   = (HeapWord*)round_to(
7415                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7416           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7417           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7418           // Bump _threshold to end_card_addr; note that
7419           // _threshold cannot possibly exceed end_card_addr, anyhow.
7420           // This prevents future clearing of the card as the scan proceeds
7421           // to the right.
7422           assert(_threshold <= end_card_addr,
7423                  "Because we are just scanning into this object");
7424           if (_threshold < end_card_addr) {
7425             _threshold = end_card_addr;
7426           }
7427           if (p->klass_or_null() != NULL) {
7428             // Redirty the range of cards...
7429             _mut->mark_range(redirty_range);
7430           } // ...else the setting of klass will dirty the card anyway.
7431         }
7432       DEBUG_ONLY(})
7433       return true;
7434     }
7435   }
7436   scanOopsInOop(addr);
7437   return true;
7438 }
7439 
7440 // We take a break if we've been at this for a while,
7441 // so as to avoid monopolizing the locks involved.
7442 void MarkFromRootsClosure::do_yield_work() {
7443   // First give up the locks, then yield, then re-lock
7444   // We should probably use a constructor/destructor idiom to
7445   // do this unlock/lock or modify the MutexUnlocker class to
7446   // serve our purpose. XXX
7447   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7448          "CMS thread should hold CMS token");
7449   assert_lock_strong(_bitMap->lock());
7450   _bitMap->lock()->unlock();
7451   ConcurrentMarkSweepThread::desynchronize(true);
7452   ConcurrentMarkSweepThread::acknowledge_yield_request();
7453   _collector->stopTimer();
7454   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7455   if (PrintCMSStatistics != 0) {
7456     _collector->incrementYields();
7457   }
7458   _collector->icms_wait();
7459 
7460   // See the comment in coordinator_yield()
7461   for (unsigned i = 0; i < CMSYieldSleepCount &&
7462                        ConcurrentMarkSweepThread::should_yield() &&
7463                        !CMSCollector::foregroundGCIsActive(); ++i) {
7464     os::sleep(Thread::current(), 1, false);
7465     ConcurrentMarkSweepThread::acknowledge_yield_request();
7466   }
7467 
7468   ConcurrentMarkSweepThread::synchronize(true);
7469   _bitMap->lock()->lock_without_safepoint_check();
7470   _collector->startTimer();
7471 }
7472 
7473 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7474   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7475   assert(_markStack->isEmpty(),
7476          "should drain stack to limit stack usage");
7477   // convert ptr to an oop preparatory to scanning
7478   oop obj = oop(ptr);
7479   // Ignore mark word in verification below, since we
7480   // may be running concurrent with mutators.
7481   assert(obj->is_oop(true), "should be an oop");
7482   assert(_finger <= ptr, "_finger runneth ahead");
7483   // advance the finger to right end of this object
7484   _finger = ptr + obj->size();
7485   assert(_finger > ptr, "we just incremented it above");
7486   // On large heaps, it may take us some time to get through
7487   // the marking phase (especially if running iCMS). During
7488   // this time it's possible that a lot of mutations have
7489   // accumulated in the card table and the mod union table --
7490   // these mutation records are redundant until we have
7491   // actually traced into the corresponding card.
7492   // Here, we check whether advancing the finger would make
7493   // us cross into a new card, and if so clear corresponding
7494   // cards in the MUT (preclean them in the card-table in the
7495   // future).
7496 
7497   DEBUG_ONLY(if (!_verifying) {)
7498     // The clean-on-enter optimization is disabled by default,
7499     // until we fix 6178663.
7500     if (CMSCleanOnEnter && (_finger > _threshold)) {
7501       // [_threshold, _finger) represents the interval
7502       // of cards to be cleared  in MUT (or precleaned in card table).
7503       // The set of cards to be cleared is all those that overlap
7504       // with the interval [_threshold, _finger); note that
7505       // _threshold is always kept card-aligned but _finger isn't
7506       // always card-aligned.
7507       HeapWord* old_threshold = _threshold;
7508       assert(old_threshold == (HeapWord*)round_to(
7509               (intptr_t)old_threshold, CardTableModRefBS::card_size),
7510              "_threshold should always be card-aligned");
7511       _threshold = (HeapWord*)round_to(
7512                      (intptr_t)_finger, CardTableModRefBS::card_size);
7513       MemRegion mr(old_threshold, _threshold);
7514       assert(!mr.is_empty(), "Control point invariant");
7515       assert(_span.contains(mr), "Should clear within span");
7516       _mut->clear_range(mr);
7517     }
7518   DEBUG_ONLY(})
7519   // Note: the finger doesn't advance while we drain
7520   // the stack below.
7521   PushOrMarkClosure pushOrMarkClosure(_collector,
7522                                       _span, _bitMap, _markStack,
7523                                       _finger, this);
7524   bool res = _markStack->push(obj);
7525   assert(res, "Empty non-zero size stack should have space for single push");
7526   while (!_markStack->isEmpty()) {
7527     oop new_oop = _markStack->pop();
7528     // Skip verifying header mark word below because we are
7529     // running concurrent with mutators.
7530     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7531     // now scan this oop's oops
7532     new_oop->oop_iterate(&pushOrMarkClosure);
7533     do_yield_check();
7534   }
7535   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7536 }
7537 
7538 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7539                        CMSCollector* collector, MemRegion span,
7540                        CMSBitMap* bit_map,
7541                        OopTaskQueue* work_queue,
7542                        CMSMarkStack*  overflow_stack,
7543                        bool should_yield):
7544   _collector(collector),
7545   _whole_span(collector->_span),
7546   _span(span),
7547   _bit_map(bit_map),
7548   _mut(&collector->_modUnionTable),
7549   _work_queue(work_queue),
7550   _overflow_stack(overflow_stack),
7551   _yield(should_yield),
7552   _skip_bits(0),
7553   _task(task)
7554 {
7555   assert(_work_queue->size() == 0, "work_queue should be empty");
7556   _finger = span.start();
7557   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
7558   assert(_span.contains(_finger), "Out of bounds _finger?");
7559 }
7560 
7561 // Should revisit to see if this should be restructured for
7562 // greater efficiency.
7563 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7564   if (_skip_bits > 0) {
7565     _skip_bits--;
7566     return true;
7567   }
7568   // convert offset into a HeapWord*
7569   HeapWord* addr = _bit_map->startWord() + offset;
7570   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7571          "address out of range");
7572   assert(_bit_map->isMarked(addr), "tautology");
7573   if (_bit_map->isMarked(addr+1)) {
7574     // this is an allocated object that might not yet be initialized
7575     assert(_skip_bits == 0, "tautology");
7576     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
7577     oop p = oop(addr);
7578     if (p->klass_or_null() == NULL) {
7579       // in the case of Clean-on-Enter optimization, redirty card
7580       // and avoid clearing card by increasing  the threshold.
7581       return true;
7582     }
7583   }
7584   scan_oops_in_oop(addr);
7585   return true;
7586 }
7587 
7588 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7589   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7590   // Should we assert that our work queue is empty or
7591   // below some drain limit?
7592   assert(_work_queue->size() == 0,
7593          "should drain stack to limit stack usage");
7594   // convert ptr to an oop preparatory to scanning
7595   oop obj = oop(ptr);
7596   // Ignore mark word in verification below, since we
7597   // may be running concurrent with mutators.
7598   assert(obj->is_oop(true), "should be an oop");
7599   assert(_finger <= ptr, "_finger runneth ahead");
7600   // advance the finger to right end of this object
7601   _finger = ptr + obj->size();
7602   assert(_finger > ptr, "we just incremented it above");
7603   // On large heaps, it may take us some time to get through
7604   // the marking phase (especially if running iCMS). During
7605   // this time it's possible that a lot of mutations have
7606   // accumulated in the card table and the mod union table --
7607   // these mutation records are redundant until we have
7608   // actually traced into the corresponding card.
7609   // Here, we check whether advancing the finger would make
7610   // us cross into a new card, and if so clear corresponding
7611   // cards in the MUT (preclean them in the card-table in the
7612   // future).
7613 
7614   // The clean-on-enter optimization is disabled by default,
7615   // until we fix 6178663.
7616   if (CMSCleanOnEnter && (_finger > _threshold)) {
7617     // [_threshold, _finger) represents the interval
7618     // of cards to be cleared  in MUT (or precleaned in card table).
7619     // The set of cards to be cleared is all those that overlap
7620     // with the interval [_threshold, _finger); note that
7621     // _threshold is always kept card-aligned but _finger isn't
7622     // always card-aligned.
7623     HeapWord* old_threshold = _threshold;
7624     assert(old_threshold == (HeapWord*)round_to(
7625             (intptr_t)old_threshold, CardTableModRefBS::card_size),
7626            "_threshold should always be card-aligned");
7627     _threshold = (HeapWord*)round_to(
7628                    (intptr_t)_finger, CardTableModRefBS::card_size);
7629     MemRegion mr(old_threshold, _threshold);
7630     assert(!mr.is_empty(), "Control point invariant");
7631     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7632     _mut->clear_range(mr);
7633   }
7634 
7635   // Note: the local finger doesn't advance while we drain
7636   // the stack below, but the global finger sure can and will.
7637   HeapWord** gfa = _task->global_finger_addr();
7638   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7639                                       _span, _bit_map,
7640                                       _work_queue,
7641                                       _overflow_stack,
7642                                       _finger,
7643                                       gfa, this);
7644   bool res = _work_queue->push(obj);   // overflow could occur here
7645   assert(res, "Will hold once we use workqueues");
7646   while (true) {
7647     oop new_oop;
7648     if (!_work_queue->pop_local(new_oop)) {
7649       // We emptied our work_queue; check if there's stuff that can
7650       // be gotten from the overflow stack.
7651       if (CMSConcMarkingTask::get_work_from_overflow_stack(
7652             _overflow_stack, _work_queue)) {
7653         do_yield_check();
7654         continue;
7655       } else {  // done
7656         break;
7657       }
7658     }
7659     // Skip verifying header mark word below because we are
7660     // running concurrent with mutators.
7661     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7662     // now scan this oop's oops
7663     new_oop->oop_iterate(&pushOrMarkClosure);
7664     do_yield_check();
7665   }
7666   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7667 }
7668 
7669 // Yield in response to a request from VM Thread or
7670 // from mutators.
7671 void Par_MarkFromRootsClosure::do_yield_work() {
7672   assert(_task != NULL, "sanity");
7673   _task->yield();
7674 }
7675 
7676 // A variant of the above used for verifying CMS marking work.
7677 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7678                         MemRegion span,
7679                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7680                         CMSMarkStack*  mark_stack):
7681   _collector(collector),
7682   _span(span),
7683   _verification_bm(verification_bm),
7684   _cms_bm(cms_bm),
7685   _mark_stack(mark_stack),
7686   _pam_verify_closure(collector, span, verification_bm, cms_bm,
7687                       mark_stack)
7688 {
7689   assert(_mark_stack->isEmpty(), "stack should be empty");
7690   _finger = _verification_bm->startWord();
7691   assert(_collector->_restart_addr == NULL, "Sanity check");
7692   assert(_span.contains(_finger), "Out of bounds _finger?");
7693 }
7694 
7695 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7696   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7697   assert(_span.contains(addr), "Out of bounds _finger?");
7698   _finger = addr;
7699 }
7700 
7701 // Should revisit to see if this should be restructured for
7702 // greater efficiency.
7703 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7704   // convert offset into a HeapWord*
7705   HeapWord* addr = _verification_bm->startWord() + offset;
7706   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7707          "address out of range");
7708   assert(_verification_bm->isMarked(addr), "tautology");
7709   assert(_cms_bm->isMarked(addr), "tautology");
7710 
7711   assert(_mark_stack->isEmpty(),
7712          "should drain stack to limit stack usage");
7713   // convert addr to an oop preparatory to scanning
7714   oop obj = oop(addr);
7715   assert(obj->is_oop(), "should be an oop");
7716   assert(_finger <= addr, "_finger runneth ahead");
7717   // advance the finger to right end of this object
7718   _finger = addr + obj->size();
7719   assert(_finger > addr, "we just incremented it above");
7720   // Note: the finger doesn't advance while we drain
7721   // the stack below.
7722   bool res = _mark_stack->push(obj);
7723   assert(res, "Empty non-zero size stack should have space for single push");
7724   while (!_mark_stack->isEmpty()) {
7725     oop new_oop = _mark_stack->pop();
7726     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7727     // now scan this oop's oops
7728     new_oop->oop_iterate(&_pam_verify_closure);
7729   }
7730   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7731   return true;
7732 }
7733 
7734 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7735   CMSCollector* collector, MemRegion span,
7736   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7737   CMSMarkStack*  mark_stack):
7738   CMSOopClosure(collector->ref_processor()),
7739   _collector(collector),
7740   _span(span),
7741   _verification_bm(verification_bm),
7742   _cms_bm(cms_bm),
7743   _mark_stack(mark_stack)
7744 { }
7745 
7746 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
7747 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7748 
7749 // Upon stack overflow, we discard (part of) the stack,
7750 // remembering the least address amongst those discarded
7751 // in CMSCollector's _restart_address.
7752 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7753   // Remember the least grey address discarded
7754   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7755   _collector->lower_restart_addr(ra);
7756   _mark_stack->reset();  // discard stack contents
7757   _mark_stack->expand(); // expand the stack if possible
7758 }
7759 
7760 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7761   assert(obj->is_oop_or_null(), "expected an oop or NULL");
7762   HeapWord* addr = (HeapWord*)obj;
7763   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7764     // Oop lies in _span and isn't yet grey or black
7765     _verification_bm->mark(addr);            // now grey
7766     if (!_cms_bm->isMarked(addr)) {
7767       oop(addr)->print();
7768       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7769                              addr);
7770       fatal("... aborting");
7771     }
7772 
7773     if (!_mark_stack->push(obj)) { // stack overflow
7774       if (PrintCMSStatistics != 0) {
7775         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7776                                SIZE_FORMAT, _mark_stack->capacity());
7777       }
7778       assert(_mark_stack->isFull(), "Else push should have succeeded");
7779       handle_stack_overflow(addr);
7780     }
7781     // anything including and to the right of _finger
7782     // will be scanned as we iterate over the remainder of the
7783     // bit map
7784   }
7785 }
7786 
7787 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7788                      MemRegion span,
7789                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7790                      HeapWord* finger, MarkFromRootsClosure* parent) :
7791   CMSOopClosure(collector->ref_processor()),
7792   _collector(collector),
7793   _span(span),
7794   _bitMap(bitMap),
7795   _markStack(markStack),
7796   _finger(finger),
7797   _parent(parent)
7798 { }
7799 
7800 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7801                      MemRegion span,
7802                      CMSBitMap* bit_map,
7803                      OopTaskQueue* work_queue,
7804                      CMSMarkStack*  overflow_stack,
7805                      HeapWord* finger,
7806                      HeapWord** global_finger_addr,
7807                      Par_MarkFromRootsClosure* parent) :
7808   CMSOopClosure(collector->ref_processor()),
7809   _collector(collector),
7810   _whole_span(collector->_span),
7811   _span(span),
7812   _bit_map(bit_map),
7813   _work_queue(work_queue),
7814   _overflow_stack(overflow_stack),
7815   _finger(finger),
7816   _global_finger_addr(global_finger_addr),
7817   _parent(parent)
7818 { }
7819 
7820 // Assumes thread-safe access by callers, who are
7821 // responsible for mutual exclusion.
7822 void CMSCollector::lower_restart_addr(HeapWord* low) {
7823   assert(_span.contains(low), "Out of bounds addr");
7824   if (_restart_addr == NULL) {
7825     _restart_addr = low;
7826   } else {
7827     _restart_addr = MIN2(_restart_addr, low);
7828   }
7829 }
7830 
7831 // Upon stack overflow, we discard (part of) the stack,
7832 // remembering the least address amongst those discarded
7833 // in CMSCollector's _restart_address.
7834 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7835   // Remember the least grey address discarded
7836   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7837   _collector->lower_restart_addr(ra);
7838   _markStack->reset();  // discard stack contents
7839   _markStack->expand(); // expand the stack if possible
7840 }
7841 
7842 // Upon stack overflow, we discard (part of) the stack,
7843 // remembering the least address amongst those discarded
7844 // in CMSCollector's _restart_address.
7845 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7846   // We need to do this under a mutex to prevent other
7847   // workers from interfering with the work done below.
7848   MutexLockerEx ml(_overflow_stack->par_lock(),
7849                    Mutex::_no_safepoint_check_flag);
7850   // Remember the least grey address discarded
7851   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7852   _collector->lower_restart_addr(ra);
7853   _overflow_stack->reset();  // discard stack contents
7854   _overflow_stack->expand(); // expand the stack if possible
7855 }
7856 
7857 void CMKlassClosure::do_klass(Klass* k) {
7858   assert(_oop_closure != NULL, "Not initialized?");
7859   k->oops_do(_oop_closure);
7860 }
7861 
7862 void PushOrMarkClosure::do_oop(oop obj) {
7863   // Ignore mark word because we are running concurrent with mutators.
7864   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7865   HeapWord* addr = (HeapWord*)obj;
7866   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7867     // Oop lies in _span and isn't yet grey or black
7868     _bitMap->mark(addr);            // now grey
7869     if (addr < _finger) {
7870       // the bit map iteration has already either passed, or
7871       // sampled, this bit in the bit map; we'll need to
7872       // use the marking stack to scan this oop's oops.
7873       bool simulate_overflow = false;
7874       NOT_PRODUCT(
7875         if (CMSMarkStackOverflowALot &&
7876             _collector->simulate_overflow()) {
7877           // simulate a stack overflow
7878           simulate_overflow = true;
7879         }
7880       )
7881       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7882         if (PrintCMSStatistics != 0) {
7883           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7884                                  SIZE_FORMAT, _markStack->capacity());
7885         }
7886         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7887         handle_stack_overflow(addr);
7888       }
7889     }
7890     // anything including and to the right of _finger
7891     // will be scanned as we iterate over the remainder of the
7892     // bit map
7893     do_yield_check();
7894   }
7895 }
7896 
7897 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7898 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7899 
7900 void Par_PushOrMarkClosure::do_oop(oop obj) {
7901   // Ignore mark word because we are running concurrent with mutators.
7902   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7903   HeapWord* addr = (HeapWord*)obj;
7904   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7905     // Oop lies in _span and isn't yet grey or black
7906     // We read the global_finger (volatile read) strictly after marking oop
7907     bool res = _bit_map->par_mark(addr);    // now grey
7908     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7909     // Should we push this marked oop on our stack?
7910     // -- if someone else marked it, nothing to do
7911     // -- if target oop is above global finger nothing to do
7912     // -- if target oop is in chunk and above local finger
7913     //      then nothing to do
7914     // -- else push on work queue
7915     if (   !res       // someone else marked it, they will deal with it
7916         || (addr >= *gfa)  // will be scanned in a later task
7917         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7918       return;
7919     }
7920     // the bit map iteration has already either passed, or
7921     // sampled, this bit in the bit map; we'll need to
7922     // use the marking stack to scan this oop's oops.
7923     bool simulate_overflow = false;
7924     NOT_PRODUCT(
7925       if (CMSMarkStackOverflowALot &&
7926           _collector->simulate_overflow()) {
7927         // simulate a stack overflow
7928         simulate_overflow = true;
7929       }
7930     )
7931     if (simulate_overflow ||
7932         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7933       // stack overflow
7934       if (PrintCMSStatistics != 0) {
7935         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7936                                SIZE_FORMAT, _overflow_stack->capacity());
7937       }
7938       // We cannot assert that the overflow stack is full because
7939       // it may have been emptied since.
7940       assert(simulate_overflow ||
7941              _work_queue->size() == _work_queue->max_elems(),
7942             "Else push should have succeeded");
7943       handle_stack_overflow(addr);
7944     }
7945     do_yield_check();
7946   }
7947 }
7948 
7949 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7950 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7951 
7952 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7953                                        MemRegion span,
7954                                        ReferenceProcessor* rp,
7955                                        CMSBitMap* bit_map,
7956                                        CMSBitMap* mod_union_table,
7957                                        CMSMarkStack*  mark_stack,
7958                                        bool           concurrent_precleaning):
7959   CMSOopClosure(rp),
7960   _collector(collector),
7961   _span(span),
7962   _bit_map(bit_map),
7963   _mod_union_table(mod_union_table),
7964   _mark_stack(mark_stack),
7965   _concurrent_precleaning(concurrent_precleaning)
7966 {
7967   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7968 }
7969 
7970 // Grey object rescan during pre-cleaning and second checkpoint phases --
7971 // the non-parallel version (the parallel version appears further below.)
7972 void PushAndMarkClosure::do_oop(oop obj) {
7973   // Ignore mark word verification. If during concurrent precleaning,
7974   // the object monitor may be locked. If during the checkpoint
7975   // phases, the object may already have been reached by a  different
7976   // path and may be at the end of the global overflow list (so
7977   // the mark word may be NULL).
7978   assert(obj->is_oop_or_null(true /* ignore mark word */),
7979          "expected an oop or NULL");
7980   HeapWord* addr = (HeapWord*)obj;
7981   // Check if oop points into the CMS generation
7982   // and is not marked
7983   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7984     // a white object ...
7985     _bit_map->mark(addr);         // ... now grey
7986     // push on the marking stack (grey set)
7987     bool simulate_overflow = false;
7988     NOT_PRODUCT(
7989       if (CMSMarkStackOverflowALot &&
7990           _collector->simulate_overflow()) {
7991         // simulate a stack overflow
7992         simulate_overflow = true;
7993       }
7994     )
7995     if (simulate_overflow || !_mark_stack->push(obj)) {
7996       if (_concurrent_precleaning) {
7997          // During precleaning we can just dirty the appropriate card(s)
7998          // in the mod union table, thus ensuring that the object remains
7999          // in the grey set  and continue. In the case of object arrays
8000          // we need to dirty all of the cards that the object spans,
8001          // since the rescan of object arrays will be limited to the
8002          // dirty cards.
8003          // Note that no one can be interfering with us in this action
8004          // of dirtying the mod union table, so no locking or atomics
8005          // are required.
8006          if (obj->is_objArray()) {
8007            size_t sz = obj->size();
8008            HeapWord* end_card_addr = (HeapWord*)round_to(
8009                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
8010            MemRegion redirty_range = MemRegion(addr, end_card_addr);
8011            assert(!redirty_range.is_empty(), "Arithmetical tautology");
8012            _mod_union_table->mark_range(redirty_range);
8013          } else {
8014            _mod_union_table->mark(addr);
8015          }
8016          _collector->_ser_pmc_preclean_ovflw++;
8017       } else {
8018          // During the remark phase, we need to remember this oop
8019          // in the overflow list.
8020          _collector->push_on_overflow_list(obj);
8021          _collector->_ser_pmc_remark_ovflw++;
8022       }
8023     }
8024   }
8025 }
8026 
8027 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8028                                                MemRegion span,
8029                                                ReferenceProcessor* rp,
8030                                                CMSBitMap* bit_map,
8031                                                OopTaskQueue* work_queue):
8032   CMSOopClosure(rp),
8033   _collector(collector),
8034   _span(span),
8035   _bit_map(bit_map),
8036   _work_queue(work_queue)
8037 {
8038   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8039 }
8040 
8041 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
8042 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8043 
8044 // Grey object rescan during second checkpoint phase --
8045 // the parallel version.
8046 void Par_PushAndMarkClosure::do_oop(oop obj) {
8047   // In the assert below, we ignore the mark word because
8048   // this oop may point to an already visited object that is
8049   // on the overflow stack (in which case the mark word has
8050   // been hijacked for chaining into the overflow stack --
8051   // if this is the last object in the overflow stack then
8052   // its mark word will be NULL). Because this object may
8053   // have been subsequently popped off the global overflow
8054   // stack, and the mark word possibly restored to the prototypical
8055   // value, by the time we get to examined this failing assert in
8056   // the debugger, is_oop_or_null(false) may subsequently start
8057   // to hold.
8058   assert(obj->is_oop_or_null(true),
8059          "expected an oop or NULL");
8060   HeapWord* addr = (HeapWord*)obj;
8061   // Check if oop points into the CMS generation
8062   // and is not marked
8063   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8064     // a white object ...
8065     // If we manage to "claim" the object, by being the
8066     // first thread to mark it, then we push it on our
8067     // marking stack
8068     if (_bit_map->par_mark(addr)) {     // ... now grey
8069       // push on work queue (grey set)
8070       bool simulate_overflow = false;
8071       NOT_PRODUCT(
8072         if (CMSMarkStackOverflowALot &&
8073             _collector->par_simulate_overflow()) {
8074           // simulate a stack overflow
8075           simulate_overflow = true;
8076         }
8077       )
8078       if (simulate_overflow || !_work_queue->push(obj)) {
8079         _collector->par_push_on_overflow_list(obj);
8080         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
8081       }
8082     } // Else, some other thread got there first
8083   }
8084 }
8085 
8086 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
8087 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8088 
8089 void CMSPrecleanRefsYieldClosure::do_yield_work() {
8090   Mutex* bml = _collector->bitMapLock();
8091   assert_lock_strong(bml);
8092   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8093          "CMS thread should hold CMS token");
8094 
8095   bml->unlock();
8096   ConcurrentMarkSweepThread::desynchronize(true);
8097 
8098   ConcurrentMarkSweepThread::acknowledge_yield_request();
8099 
8100   _collector->stopTimer();
8101   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8102   if (PrintCMSStatistics != 0) {
8103     _collector->incrementYields();
8104   }
8105   _collector->icms_wait();
8106 
8107   // See the comment in coordinator_yield()
8108   for (unsigned i = 0; i < CMSYieldSleepCount &&
8109                        ConcurrentMarkSweepThread::should_yield() &&
8110                        !CMSCollector::foregroundGCIsActive(); ++i) {
8111     os::sleep(Thread::current(), 1, false);
8112     ConcurrentMarkSweepThread::acknowledge_yield_request();
8113   }
8114 
8115   ConcurrentMarkSweepThread::synchronize(true);
8116   bml->lock();
8117 
8118   _collector->startTimer();
8119 }
8120 
8121 bool CMSPrecleanRefsYieldClosure::should_return() {
8122   if (ConcurrentMarkSweepThread::should_yield()) {
8123     do_yield_work();
8124   }
8125   return _collector->foregroundGCIsActive();
8126 }
8127 
8128 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8129   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8130          "mr should be aligned to start at a card boundary");
8131   // We'd like to assert:
8132   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8133   //        "mr should be a range of cards");
8134   // However, that would be too strong in one case -- the last
8135   // partition ends at _unallocated_block which, in general, can be
8136   // an arbitrary boundary, not necessarily card aligned.
8137   if (PrintCMSStatistics != 0) {
8138     _num_dirty_cards +=
8139          mr.word_size()/CardTableModRefBS::card_size_in_words;
8140   }
8141   _space->object_iterate_mem(mr, &_scan_cl);
8142 }
8143 
8144 SweepClosure::SweepClosure(CMSCollector* collector,
8145                            ConcurrentMarkSweepGeneration* g,
8146                            CMSBitMap* bitMap, bool should_yield) :
8147   _collector(collector),
8148   _g(g),
8149   _sp(g->cmsSpace()),
8150   _limit(_sp->sweep_limit()),
8151   _freelistLock(_sp->freelistLock()),
8152   _bitMap(bitMap),
8153   _yield(should_yield),
8154   _inFreeRange(false),           // No free range at beginning of sweep
8155   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
8156   _lastFreeRangeCoalesced(false),
8157   _freeFinger(g->used_region().start())
8158 {
8159   NOT_PRODUCT(
8160     _numObjectsFreed = 0;
8161     _numWordsFreed   = 0;
8162     _numObjectsLive = 0;
8163     _numWordsLive = 0;
8164     _numObjectsAlreadyFree = 0;
8165     _numWordsAlreadyFree = 0;
8166     _last_fc = NULL;
8167 
8168     _sp->initializeIndexedFreeListArrayReturnedBytes();
8169     _sp->dictionary()->initialize_dict_returned_bytes();
8170   )
8171   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8172          "sweep _limit out of bounds");
8173   if (CMSTraceSweeper) {
8174     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8175                         _limit);
8176   }
8177 }
8178 
8179 void SweepClosure::print_on(outputStream* st) const {
8180   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8181                 _sp->bottom(), _sp->end());
8182   tty->print_cr("_limit = " PTR_FORMAT, _limit);
8183   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8184   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8185   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8186                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8187 }
8188 
8189 #ifndef PRODUCT
8190 // Assertion checking only:  no useful work in product mode --
8191 // however, if any of the flags below become product flags,
8192 // you may need to review this code to see if it needs to be
8193 // enabled in product mode.
8194 SweepClosure::~SweepClosure() {
8195   assert_lock_strong(_freelistLock);
8196   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8197          "sweep _limit out of bounds");
8198   if (inFreeRange()) {
8199     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8200     print();
8201     ShouldNotReachHere();
8202   }
8203   if (Verbose && PrintGC) {
8204     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8205                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8206     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
8207                            SIZE_FORMAT" bytes  "
8208       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8209       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8210       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8211     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8212                         * sizeof(HeapWord);
8213     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8214 
8215     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8216       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8217       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8218       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8219       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8220       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
8221         indexListReturnedBytes);
8222       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
8223         dict_returned_bytes);
8224     }
8225   }
8226   if (CMSTraceSweeper) {
8227     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8228                            _limit);
8229   }
8230 }
8231 #endif  // PRODUCT
8232 
8233 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8234     bool freeRangeInFreeLists) {
8235   if (CMSTraceSweeper) {
8236     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
8237                freeFinger, freeRangeInFreeLists);
8238   }
8239   assert(!inFreeRange(), "Trampling existing free range");
8240   set_inFreeRange(true);
8241   set_lastFreeRangeCoalesced(false);
8242 
8243   set_freeFinger(freeFinger);
8244   set_freeRangeInFreeLists(freeRangeInFreeLists);
8245   if (CMSTestInFreeList) {
8246     if (freeRangeInFreeLists) {
8247       FreeChunk* fc = (FreeChunk*) freeFinger;
8248       assert(fc->is_free(), "A chunk on the free list should be free.");
8249       assert(fc->size() > 0, "Free range should have a size");
8250       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8251     }
8252   }
8253 }
8254 
8255 // Note that the sweeper runs concurrently with mutators. Thus,
8256 // it is possible for direct allocation in this generation to happen
8257 // in the middle of the sweep. Note that the sweeper also coalesces
8258 // contiguous free blocks. Thus, unless the sweeper and the allocator
8259 // synchronize appropriately freshly allocated blocks may get swept up.
8260 // This is accomplished by the sweeper locking the free lists while
8261 // it is sweeping. Thus blocks that are determined to be free are
8262 // indeed free. There is however one additional complication:
8263 // blocks that have been allocated since the final checkpoint and
8264 // mark, will not have been marked and so would be treated as
8265 // unreachable and swept up. To prevent this, the allocator marks
8266 // the bit map when allocating during the sweep phase. This leads,
8267 // however, to a further complication -- objects may have been allocated
8268 // but not yet initialized -- in the sense that the header isn't yet
8269 // installed. The sweeper can not then determine the size of the block
8270 // in order to skip over it. To deal with this case, we use a technique
8271 // (due to Printezis) to encode such uninitialized block sizes in the
8272 // bit map. Since the bit map uses a bit per every HeapWord, but the
8273 // CMS generation has a minimum object size of 3 HeapWords, it follows
8274 // that "normal marks" won't be adjacent in the bit map (there will
8275 // always be at least two 0 bits between successive 1 bits). We make use
8276 // of these "unused" bits to represent uninitialized blocks -- the bit
8277 // corresponding to the start of the uninitialized object and the next
8278 // bit are both set. Finally, a 1 bit marks the end of the object that
8279 // started with the two consecutive 1 bits to indicate its potentially
8280 // uninitialized state.
8281 
8282 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8283   FreeChunk* fc = (FreeChunk*)addr;
8284   size_t res;
8285 
8286   // Check if we are done sweeping. Below we check "addr >= _limit" rather
8287   // than "addr == _limit" because although _limit was a block boundary when
8288   // we started the sweep, it may no longer be one because heap expansion
8289   // may have caused us to coalesce the block ending at the address _limit
8290   // with a newly expanded chunk (this happens when _limit was set to the
8291   // previous _end of the space), so we may have stepped past _limit:
8292   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8293   if (addr >= _limit) { // we have swept up to or past the limit: finish up
8294     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8295            "sweep _limit out of bounds");
8296     assert(addr < _sp->end(), "addr out of bounds");
8297     // Flush any free range we might be holding as a single
8298     // coalesced chunk to the appropriate free list.
8299     if (inFreeRange()) {
8300       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8301              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8302       flush_cur_free_chunk(freeFinger(),
8303                            pointer_delta(addr, freeFinger()));
8304       if (CMSTraceSweeper) {
8305         gclog_or_tty->print("Sweep: last chunk: ");
8306         gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
8307                    "[coalesced:%d]\n",
8308                    freeFinger(), pointer_delta(addr, freeFinger()),
8309                    lastFreeRangeCoalesced() ? 1 : 0);
8310       }
8311     }
8312 
8313     // help the iterator loop finish
8314     return pointer_delta(_sp->end(), addr);
8315   }
8316 
8317   assert(addr < _limit, "sweep invariant");
8318   // check if we should yield
8319   do_yield_check(addr);
8320   if (fc->is_free()) {
8321     // Chunk that is already free
8322     res = fc->size();
8323     do_already_free_chunk(fc);
8324     debug_only(_sp->verifyFreeLists());
8325     // If we flush the chunk at hand in lookahead_and_flush()
8326     // and it's coalesced with a preceding chunk, then the
8327     // process of "mangling" the payload of the coalesced block
8328     // will cause erasure of the size information from the
8329     // (erstwhile) header of all the coalesced blocks but the
8330     // first, so the first disjunct in the assert will not hold
8331     // in that specific case (in which case the second disjunct
8332     // will hold).
8333     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8334            "Otherwise the size info doesn't change at this step");
8335     NOT_PRODUCT(
8336       _numObjectsAlreadyFree++;
8337       _numWordsAlreadyFree += res;
8338     )
8339     NOT_PRODUCT(_last_fc = fc;)
8340   } else if (!_bitMap->isMarked(addr)) {
8341     // Chunk is fresh garbage
8342     res = do_garbage_chunk(fc);
8343     debug_only(_sp->verifyFreeLists());
8344     NOT_PRODUCT(
8345       _numObjectsFreed++;
8346       _numWordsFreed += res;
8347     )
8348   } else {
8349     // Chunk that is alive.
8350     res = do_live_chunk(fc);
8351     debug_only(_sp->verifyFreeLists());
8352     NOT_PRODUCT(
8353         _numObjectsLive++;
8354         _numWordsLive += res;
8355     )
8356   }
8357   return res;
8358 }
8359 
8360 // For the smart allocation, record following
8361 //  split deaths - a free chunk is removed from its free list because
8362 //      it is being split into two or more chunks.
8363 //  split birth - a free chunk is being added to its free list because
8364 //      a larger free chunk has been split and resulted in this free chunk.
8365 //  coal death - a free chunk is being removed from its free list because
8366 //      it is being coalesced into a large free chunk.
8367 //  coal birth - a free chunk is being added to its free list because
8368 //      it was created when two or more free chunks where coalesced into
8369 //      this free chunk.
8370 //
8371 // These statistics are used to determine the desired number of free
8372 // chunks of a given size.  The desired number is chosen to be relative
8373 // to the end of a CMS sweep.  The desired number at the end of a sweep
8374 // is the
8375 //      count-at-end-of-previous-sweep (an amount that was enough)
8376 //              - count-at-beginning-of-current-sweep  (the excess)
8377 //              + split-births  (gains in this size during interval)
8378 //              - split-deaths  (demands on this size during interval)
8379 // where the interval is from the end of one sweep to the end of the
8380 // next.
8381 //
8382 // When sweeping the sweeper maintains an accumulated chunk which is
8383 // the chunk that is made up of chunks that have been coalesced.  That
8384 // will be termed the left-hand chunk.  A new chunk of garbage that
8385 // is being considered for coalescing will be referred to as the
8386 // right-hand chunk.
8387 //
8388 // When making a decision on whether to coalesce a right-hand chunk with
8389 // the current left-hand chunk, the current count vs. the desired count
8390 // of the left-hand chunk is considered.  Also if the right-hand chunk
8391 // is near the large chunk at the end of the heap (see
8392 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8393 // left-hand chunk is coalesced.
8394 //
8395 // When making a decision about whether to split a chunk, the desired count
8396 // vs. the current count of the candidate to be split is also considered.
8397 // If the candidate is underpopulated (currently fewer chunks than desired)
8398 // a chunk of an overpopulated (currently more chunks than desired) size may
8399 // be chosen.  The "hint" associated with a free list, if non-null, points
8400 // to a free list which may be overpopulated.
8401 //
8402 
8403 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8404   const size_t size = fc->size();
8405   // Chunks that cannot be coalesced are not in the
8406   // free lists.
8407   if (CMSTestInFreeList && !fc->cantCoalesce()) {
8408     assert(_sp->verify_chunk_in_free_list(fc),
8409       "free chunk should be in free lists");
8410   }
8411   // a chunk that is already free, should not have been
8412   // marked in the bit map
8413   HeapWord* const addr = (HeapWord*) fc;
8414   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8415   // Verify that the bit map has no bits marked between
8416   // addr and purported end of this block.
8417   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8418 
8419   // Some chunks cannot be coalesced under any circumstances.
8420   // See the definition of cantCoalesce().
8421   if (!fc->cantCoalesce()) {
8422     // This chunk can potentially be coalesced.
8423     if (_sp->adaptive_freelists()) {
8424       // All the work is done in
8425       do_post_free_or_garbage_chunk(fc, size);
8426     } else {  // Not adaptive free lists
8427       // this is a free chunk that can potentially be coalesced by the sweeper;
8428       if (!inFreeRange()) {
8429         // if the next chunk is a free block that can't be coalesced
8430         // it doesn't make sense to remove this chunk from the free lists
8431         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8432         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8433         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
8434             nextChunk->is_free()               &&     // ... which is free...
8435             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
8436           // nothing to do
8437         } else {
8438           // Potentially the start of a new free range:
8439           // Don't eagerly remove it from the free lists.
8440           // No need to remove it if it will just be put
8441           // back again.  (Also from a pragmatic point of view
8442           // if it is a free block in a region that is beyond
8443           // any allocated blocks, an assertion will fail)
8444           // Remember the start of a free run.
8445           initialize_free_range(addr, true);
8446           // end - can coalesce with next chunk
8447         }
8448       } else {
8449         // the midst of a free range, we are coalescing
8450         print_free_block_coalesced(fc);
8451         if (CMSTraceSweeper) {
8452           gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
8453         }
8454         // remove it from the free lists
8455         _sp->removeFreeChunkFromFreeLists(fc);
8456         set_lastFreeRangeCoalesced(true);
8457         // If the chunk is being coalesced and the current free range is
8458         // in the free lists, remove the current free range so that it
8459         // will be returned to the free lists in its entirety - all
8460         // the coalesced pieces included.
8461         if (freeRangeInFreeLists()) {
8462           FreeChunk* ffc = (FreeChunk*) freeFinger();
8463           assert(ffc->size() == pointer_delta(addr, freeFinger()),
8464             "Size of free range is inconsistent with chunk size.");
8465           if (CMSTestInFreeList) {
8466             assert(_sp->verify_chunk_in_free_list(ffc),
8467               "free range is not in free lists");
8468           }
8469           _sp->removeFreeChunkFromFreeLists(ffc);
8470           set_freeRangeInFreeLists(false);
8471         }
8472       }
8473     }
8474     // Note that if the chunk is not coalescable (the else arm
8475     // below), we unconditionally flush, without needing to do
8476     // a "lookahead," as we do below.
8477     if (inFreeRange()) lookahead_and_flush(fc, size);
8478   } else {
8479     // Code path common to both original and adaptive free lists.
8480 
8481     // cant coalesce with previous block; this should be treated
8482     // as the end of a free run if any
8483     if (inFreeRange()) {
8484       // we kicked some butt; time to pick up the garbage
8485       assert(freeFinger() < addr, "freeFinger points too high");
8486       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8487     }
8488     // else, nothing to do, just continue
8489   }
8490 }
8491 
8492 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8493   // This is a chunk of garbage.  It is not in any free list.
8494   // Add it to a free list or let it possibly be coalesced into
8495   // a larger chunk.
8496   HeapWord* const addr = (HeapWord*) fc;
8497   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8498 
8499   if (_sp->adaptive_freelists()) {
8500     // Verify that the bit map has no bits marked between
8501     // addr and purported end of just dead object.
8502     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8503 
8504     do_post_free_or_garbage_chunk(fc, size);
8505   } else {
8506     if (!inFreeRange()) {
8507       // start of a new free range
8508       assert(size > 0, "A free range should have a size");
8509       initialize_free_range(addr, false);
8510     } else {
8511       // this will be swept up when we hit the end of the
8512       // free range
8513       if (CMSTraceSweeper) {
8514         gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
8515       }
8516       // If the chunk is being coalesced and the current free range is
8517       // in the free lists, remove the current free range so that it
8518       // will be returned to the free lists in its entirety - all
8519       // the coalesced pieces included.
8520       if (freeRangeInFreeLists()) {
8521         FreeChunk* ffc = (FreeChunk*)freeFinger();
8522         assert(ffc->size() == pointer_delta(addr, freeFinger()),
8523           "Size of free range is inconsistent with chunk size.");
8524         if (CMSTestInFreeList) {
8525           assert(_sp->verify_chunk_in_free_list(ffc),
8526             "free range is not in free lists");
8527         }
8528         _sp->removeFreeChunkFromFreeLists(ffc);
8529         set_freeRangeInFreeLists(false);
8530       }
8531       set_lastFreeRangeCoalesced(true);
8532     }
8533     // this will be swept up when we hit the end of the free range
8534 
8535     // Verify that the bit map has no bits marked between
8536     // addr and purported end of just dead object.
8537     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8538   }
8539   assert(_limit >= addr + size,
8540          "A freshly garbage chunk can't possibly straddle over _limit");
8541   if (inFreeRange()) lookahead_and_flush(fc, size);
8542   return size;
8543 }
8544 
8545 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8546   HeapWord* addr = (HeapWord*) fc;
8547   // The sweeper has just found a live object. Return any accumulated
8548   // left hand chunk to the free lists.
8549   if (inFreeRange()) {
8550     assert(freeFinger() < addr, "freeFinger points too high");
8551     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8552   }
8553 
8554   // This object is live: we'd normally expect this to be
8555   // an oop, and like to assert the following:
8556   // assert(oop(addr)->is_oop(), "live block should be an oop");
8557   // However, as we commented above, this may be an object whose
8558   // header hasn't yet been initialized.
8559   size_t size;
8560   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8561   if (_bitMap->isMarked(addr + 1)) {
8562     // Determine the size from the bit map, rather than trying to
8563     // compute it from the object header.
8564     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8565     size = pointer_delta(nextOneAddr + 1, addr);
8566     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8567            "alignment problem");
8568 
8569 #ifdef ASSERT
8570       if (oop(addr)->klass_or_null() != NULL) {
8571         // Ignore mark word because we are running concurrent with mutators
8572         assert(oop(addr)->is_oop(true), "live block should be an oop");
8573         assert(size ==
8574                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8575                "P-mark and computed size do not agree");
8576       }
8577 #endif
8578 
8579   } else {
8580     // This should be an initialized object that's alive.
8581     assert(oop(addr)->klass_or_null() != NULL,
8582            "Should be an initialized object");
8583     // Ignore mark word because we are running concurrent with mutators
8584     assert(oop(addr)->is_oop(true), "live block should be an oop");
8585     // Verify that the bit map has no bits marked between
8586     // addr and purported end of this block.
8587     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8588     assert(size >= 3, "Necessary for Printezis marks to work");
8589     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8590     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8591   }
8592   return size;
8593 }
8594 
8595 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8596                                                  size_t chunkSize) {
8597   // do_post_free_or_garbage_chunk() should only be called in the case
8598   // of the adaptive free list allocator.
8599   const bool fcInFreeLists = fc->is_free();
8600   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8601   assert((HeapWord*)fc <= _limit, "sweep invariant");
8602   if (CMSTestInFreeList && fcInFreeLists) {
8603     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8604   }
8605 
8606   if (CMSTraceSweeper) {
8607     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", fc, chunkSize);
8608   }
8609 
8610   HeapWord* const fc_addr = (HeapWord*) fc;
8611 
8612   bool coalesce;
8613   const size_t left  = pointer_delta(fc_addr, freeFinger());
8614   const size_t right = chunkSize;
8615   switch (FLSCoalescePolicy) {
8616     // numeric value forms a coalition aggressiveness metric
8617     case 0:  { // never coalesce
8618       coalesce = false;
8619       break;
8620     }
8621     case 1: { // coalesce if left & right chunks on overpopulated lists
8622       coalesce = _sp->coalOverPopulated(left) &&
8623                  _sp->coalOverPopulated(right);
8624       break;
8625     }
8626     case 2: { // coalesce if left chunk on overpopulated list (default)
8627       coalesce = _sp->coalOverPopulated(left);
8628       break;
8629     }
8630     case 3: { // coalesce if left OR right chunk on overpopulated list
8631       coalesce = _sp->coalOverPopulated(left) ||
8632                  _sp->coalOverPopulated(right);
8633       break;
8634     }
8635     case 4: { // always coalesce
8636       coalesce = true;
8637       break;
8638     }
8639     default:
8640      ShouldNotReachHere();
8641   }
8642 
8643   // Should the current free range be coalesced?
8644   // If the chunk is in a free range and either we decided to coalesce above
8645   // or the chunk is near the large block at the end of the heap
8646   // (isNearLargestChunk() returns true), then coalesce this chunk.
8647   const bool doCoalesce = inFreeRange()
8648                           && (coalesce || _g->isNearLargestChunk(fc_addr));
8649   if (doCoalesce) {
8650     // Coalesce the current free range on the left with the new
8651     // chunk on the right.  If either is on a free list,
8652     // it must be removed from the list and stashed in the closure.
8653     if (freeRangeInFreeLists()) {
8654       FreeChunk* const ffc = (FreeChunk*)freeFinger();
8655       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8656         "Size of free range is inconsistent with chunk size.");
8657       if (CMSTestInFreeList) {
8658         assert(_sp->verify_chunk_in_free_list(ffc),
8659           "Chunk is not in free lists");
8660       }
8661       _sp->coalDeath(ffc->size());
8662       _sp->removeFreeChunkFromFreeLists(ffc);
8663       set_freeRangeInFreeLists(false);
8664     }
8665     if (fcInFreeLists) {
8666       _sp->coalDeath(chunkSize);
8667       assert(fc->size() == chunkSize,
8668         "The chunk has the wrong size or is not in the free lists");
8669       _sp->removeFreeChunkFromFreeLists(fc);
8670     }
8671     set_lastFreeRangeCoalesced(true);
8672     print_free_block_coalesced(fc);
8673   } else {  // not in a free range and/or should not coalesce
8674     // Return the current free range and start a new one.
8675     if (inFreeRange()) {
8676       // In a free range but cannot coalesce with the right hand chunk.
8677       // Put the current free range into the free lists.
8678       flush_cur_free_chunk(freeFinger(),
8679                            pointer_delta(fc_addr, freeFinger()));
8680     }
8681     // Set up for new free range.  Pass along whether the right hand
8682     // chunk is in the free lists.
8683     initialize_free_range((HeapWord*)fc, fcInFreeLists);
8684   }
8685 }
8686 
8687 // Lookahead flush:
8688 // If we are tracking a free range, and this is the last chunk that
8689 // we'll look at because its end crosses past _limit, we'll preemptively
8690 // flush it along with any free range we may be holding on to. Note that
8691 // this can be the case only for an already free or freshly garbage
8692 // chunk. If this block is an object, it can never straddle
8693 // over _limit. The "straddling" occurs when _limit is set at
8694 // the previous end of the space when this cycle started, and
8695 // a subsequent heap expansion caused the previously co-terminal
8696 // free block to be coalesced with the newly expanded portion,
8697 // thus rendering _limit a non-block-boundary making it dangerous
8698 // for the sweeper to step over and examine.
8699 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8700   assert(inFreeRange(), "Should only be called if currently in a free range.");
8701   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8702   assert(_sp->used_region().contains(eob - 1),
8703          err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
8704                  " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8705                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8706                  eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8707   if (eob >= _limit) {
8708     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8709     if (CMSTraceSweeper) {
8710       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8711                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8712                              "[" PTR_FORMAT "," PTR_FORMAT ")",
8713                              _limit, fc, eob, _sp->bottom(), _sp->end());
8714     }
8715     // Return the storage we are tracking back into the free lists.
8716     if (CMSTraceSweeper) {
8717       gclog_or_tty->print_cr("Flushing ... ");
8718     }
8719     assert(freeFinger() < eob, "Error");
8720     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8721   }
8722 }
8723 
8724 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8725   assert(inFreeRange(), "Should only be called if currently in a free range.");
8726   assert(size > 0,
8727     "A zero sized chunk cannot be added to the free lists.");
8728   if (!freeRangeInFreeLists()) {
8729     if (CMSTestInFreeList) {
8730       FreeChunk* fc = (FreeChunk*) chunk;
8731       fc->set_size(size);
8732       assert(!_sp->verify_chunk_in_free_list(fc),
8733         "chunk should not be in free lists yet");
8734     }
8735     if (CMSTraceSweeper) {
8736       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
8737                     chunk, size);
8738     }
8739     // A new free range is going to be starting.  The current
8740     // free range has not been added to the free lists yet or
8741     // was removed so add it back.
8742     // If the current free range was coalesced, then the death
8743     // of the free range was recorded.  Record a birth now.
8744     if (lastFreeRangeCoalesced()) {
8745       _sp->coalBirth(size);
8746     }
8747     _sp->addChunkAndRepairOffsetTable(chunk, size,
8748             lastFreeRangeCoalesced());
8749   } else if (CMSTraceSweeper) {
8750     gclog_or_tty->print_cr("Already in free list: nothing to flush");
8751   }
8752   set_inFreeRange(false);
8753   set_freeRangeInFreeLists(false);
8754 }
8755 
8756 // We take a break if we've been at this for a while,
8757 // so as to avoid monopolizing the locks involved.
8758 void SweepClosure::do_yield_work(HeapWord* addr) {
8759   // Return current free chunk being used for coalescing (if any)
8760   // to the appropriate freelist.  After yielding, the next
8761   // free block encountered will start a coalescing range of
8762   // free blocks.  If the next free block is adjacent to the
8763   // chunk just flushed, they will need to wait for the next
8764   // sweep to be coalesced.
8765   if (inFreeRange()) {
8766     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8767   }
8768 
8769   // First give up the locks, then yield, then re-lock.
8770   // We should probably use a constructor/destructor idiom to
8771   // do this unlock/lock or modify the MutexUnlocker class to
8772   // serve our purpose. XXX
8773   assert_lock_strong(_bitMap->lock());
8774   assert_lock_strong(_freelistLock);
8775   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8776          "CMS thread should hold CMS token");
8777   _bitMap->lock()->unlock();
8778   _freelistLock->unlock();
8779   ConcurrentMarkSweepThread::desynchronize(true);
8780   ConcurrentMarkSweepThread::acknowledge_yield_request();
8781   _collector->stopTimer();
8782   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8783   if (PrintCMSStatistics != 0) {
8784     _collector->incrementYields();
8785   }
8786   _collector->icms_wait();
8787 
8788   // See the comment in coordinator_yield()
8789   for (unsigned i = 0; i < CMSYieldSleepCount &&
8790                        ConcurrentMarkSweepThread::should_yield() &&
8791                        !CMSCollector::foregroundGCIsActive(); ++i) {
8792     os::sleep(Thread::current(), 1, false);
8793     ConcurrentMarkSweepThread::acknowledge_yield_request();
8794   }
8795 
8796   ConcurrentMarkSweepThread::synchronize(true);
8797   _freelistLock->lock();
8798   _bitMap->lock()->lock_without_safepoint_check();
8799   _collector->startTimer();
8800 }
8801 
8802 #ifndef PRODUCT
8803 // This is actually very useful in a product build if it can
8804 // be called from the debugger.  Compile it into the product
8805 // as needed.
8806 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8807   return debug_cms_space->verify_chunk_in_free_list(fc);
8808 }
8809 #endif
8810 
8811 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8812   if (CMSTraceSweeper) {
8813     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8814                            fc, fc->size());
8815   }
8816 }
8817 
8818 // CMSIsAliveClosure
8819 bool CMSIsAliveClosure::do_object_b(oop obj) {
8820   HeapWord* addr = (HeapWord*)obj;
8821   return addr != NULL &&
8822          (!_span.contains(addr) || _bit_map->isMarked(addr));
8823 }
8824 
8825 
8826 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8827                       MemRegion span,
8828                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8829                       bool cpc):
8830   _collector(collector),
8831   _span(span),
8832   _bit_map(bit_map),
8833   _mark_stack(mark_stack),
8834   _concurrent_precleaning(cpc) {
8835   assert(!_span.is_empty(), "Empty span could spell trouble");
8836 }
8837 
8838 
8839 // CMSKeepAliveClosure: the serial version
8840 void CMSKeepAliveClosure::do_oop(oop obj) {
8841   HeapWord* addr = (HeapWord*)obj;
8842   if (_span.contains(addr) &&
8843       !_bit_map->isMarked(addr)) {
8844     _bit_map->mark(addr);
8845     bool simulate_overflow = false;
8846     NOT_PRODUCT(
8847       if (CMSMarkStackOverflowALot &&
8848           _collector->simulate_overflow()) {
8849         // simulate a stack overflow
8850         simulate_overflow = true;
8851       }
8852     )
8853     if (simulate_overflow || !_mark_stack->push(obj)) {
8854       if (_concurrent_precleaning) {
8855         // We dirty the overflown object and let the remark
8856         // phase deal with it.
8857         assert(_collector->overflow_list_is_empty(), "Error");
8858         // In the case of object arrays, we need to dirty all of
8859         // the cards that the object spans. No locking or atomics
8860         // are needed since no one else can be mutating the mod union
8861         // table.
8862         if (obj->is_objArray()) {
8863           size_t sz = obj->size();
8864           HeapWord* end_card_addr =
8865             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8866           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8867           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8868           _collector->_modUnionTable.mark_range(redirty_range);
8869         } else {
8870           _collector->_modUnionTable.mark(addr);
8871         }
8872         _collector->_ser_kac_preclean_ovflw++;
8873       } else {
8874         _collector->push_on_overflow_list(obj);
8875         _collector->_ser_kac_ovflw++;
8876       }
8877     }
8878   }
8879 }
8880 
8881 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8882 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8883 
8884 // CMSParKeepAliveClosure: a parallel version of the above.
8885 // The work queues are private to each closure (thread),
8886 // but (may be) available for stealing by other threads.
8887 void CMSParKeepAliveClosure::do_oop(oop obj) {
8888   HeapWord* addr = (HeapWord*)obj;
8889   if (_span.contains(addr) &&
8890       !_bit_map->isMarked(addr)) {
8891     // In general, during recursive tracing, several threads
8892     // may be concurrently getting here; the first one to
8893     // "tag" it, claims it.
8894     if (_bit_map->par_mark(addr)) {
8895       bool res = _work_queue->push(obj);
8896       assert(res, "Low water mark should be much less than capacity");
8897       // Do a recursive trim in the hope that this will keep
8898       // stack usage lower, but leave some oops for potential stealers
8899       trim_queue(_low_water_mark);
8900     } // Else, another thread got there first
8901   }
8902 }
8903 
8904 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8905 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8906 
8907 void CMSParKeepAliveClosure::trim_queue(uint max) {
8908   while (_work_queue->size() > max) {
8909     oop new_oop;
8910     if (_work_queue->pop_local(new_oop)) {
8911       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8912       assert(_bit_map->isMarked((HeapWord*)new_oop),
8913              "no white objects on this stack!");
8914       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8915       // iterate over the oops in this oop, marking and pushing
8916       // the ones in CMS heap (i.e. in _span).
8917       new_oop->oop_iterate(&_mark_and_push);
8918     }
8919   }
8920 }
8921 
8922 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8923                                 CMSCollector* collector,
8924                                 MemRegion span, CMSBitMap* bit_map,
8925                                 OopTaskQueue* work_queue):
8926   _collector(collector),
8927   _span(span),
8928   _bit_map(bit_map),
8929   _work_queue(work_queue) { }
8930 
8931 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8932   HeapWord* addr = (HeapWord*)obj;
8933   if (_span.contains(addr) &&
8934       !_bit_map->isMarked(addr)) {
8935     if (_bit_map->par_mark(addr)) {
8936       bool simulate_overflow = false;
8937       NOT_PRODUCT(
8938         if (CMSMarkStackOverflowALot &&
8939             _collector->par_simulate_overflow()) {
8940           // simulate a stack overflow
8941           simulate_overflow = true;
8942         }
8943       )
8944       if (simulate_overflow || !_work_queue->push(obj)) {
8945         _collector->par_push_on_overflow_list(obj);
8946         _collector->_par_kac_ovflw++;
8947       }
8948     } // Else another thread got there already
8949   }
8950 }
8951 
8952 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8953 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8954 
8955 //////////////////////////////////////////////////////////////////
8956 //  CMSExpansionCause                /////////////////////////////
8957 //////////////////////////////////////////////////////////////////
8958 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8959   switch (cause) {
8960     case _no_expansion:
8961       return "No expansion";
8962     case _satisfy_free_ratio:
8963       return "Free ratio";
8964     case _satisfy_promotion:
8965       return "Satisfy promotion";
8966     case _satisfy_allocation:
8967       return "allocation";
8968     case _allocate_par_lab:
8969       return "Par LAB";
8970     case _allocate_par_spooling_space:
8971       return "Par Spooling Space";
8972     case _adaptive_size_policy:
8973       return "Ergonomics";
8974     default:
8975       return "unknown";
8976   }
8977 }
8978 
8979 void CMSDrainMarkingStackClosure::do_void() {
8980   // the max number to take from overflow list at a time
8981   const size_t num = _mark_stack->capacity()/4;
8982   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8983          "Overflow list should be NULL during concurrent phases");
8984   while (!_mark_stack->isEmpty() ||
8985          // if stack is empty, check the overflow list
8986          _collector->take_from_overflow_list(num, _mark_stack)) {
8987     oop obj = _mark_stack->pop();
8988     HeapWord* addr = (HeapWord*)obj;
8989     assert(_span.contains(addr), "Should be within span");
8990     assert(_bit_map->isMarked(addr), "Should be marked");
8991     assert(obj->is_oop(), "Should be an oop");
8992     obj->oop_iterate(_keep_alive);
8993   }
8994 }
8995 
8996 void CMSParDrainMarkingStackClosure::do_void() {
8997   // drain queue
8998   trim_queue(0);
8999 }
9000 
9001 // Trim our work_queue so its length is below max at return
9002 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
9003   while (_work_queue->size() > max) {
9004     oop new_oop;
9005     if (_work_queue->pop_local(new_oop)) {
9006       assert(new_oop->is_oop(), "Expected an oop");
9007       assert(_bit_map->isMarked((HeapWord*)new_oop),
9008              "no white objects on this stack!");
9009       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
9010       // iterate over the oops in this oop, marking and pushing
9011       // the ones in CMS heap (i.e. in _span).
9012       new_oop->oop_iterate(&_mark_and_push);
9013     }
9014   }
9015 }
9016 
9017 ////////////////////////////////////////////////////////////////////
9018 // Support for Marking Stack Overflow list handling and related code
9019 ////////////////////////////////////////////////////////////////////
9020 // Much of the following code is similar in shape and spirit to the
9021 // code used in ParNewGC. We should try and share that code
9022 // as much as possible in the future.
9023 
9024 #ifndef PRODUCT
9025 // Debugging support for CMSStackOverflowALot
9026 
9027 // It's OK to call this multi-threaded;  the worst thing
9028 // that can happen is that we'll get a bunch of closely
9029 // spaced simulated overflows, but that's OK, in fact
9030 // probably good as it would exercise the overflow code
9031 // under contention.
9032 bool CMSCollector::simulate_overflow() {
9033   if (_overflow_counter-- <= 0) { // just being defensive
9034     _overflow_counter = CMSMarkStackOverflowInterval;
9035     return true;
9036   } else {
9037     return false;
9038   }
9039 }
9040 
9041 bool CMSCollector::par_simulate_overflow() {
9042   return simulate_overflow();
9043 }
9044 #endif
9045 
9046 // Single-threaded
9047 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
9048   assert(stack->isEmpty(), "Expected precondition");
9049   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
9050   size_t i = num;
9051   oop  cur = _overflow_list;
9052   const markOop proto = markOopDesc::prototype();
9053   NOT_PRODUCT(ssize_t n = 0;)
9054   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
9055     next = oop(cur->mark());
9056     cur->set_mark(proto);   // until proven otherwise
9057     assert(cur->is_oop(), "Should be an oop");
9058     bool res = stack->push(cur);
9059     assert(res, "Bit off more than can chew?");
9060     NOT_PRODUCT(n++;)
9061   }
9062   _overflow_list = cur;
9063 #ifndef PRODUCT
9064   assert(_num_par_pushes >= n, "Too many pops?");
9065   _num_par_pushes -=n;
9066 #endif
9067   return !stack->isEmpty();
9068 }
9069 
9070 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
9071 // (MT-safe) Get a prefix of at most "num" from the list.
9072 // The overflow list is chained through the mark word of
9073 // each object in the list. We fetch the entire list,
9074 // break off a prefix of the right size and return the
9075 // remainder. If other threads try to take objects from
9076 // the overflow list at that time, they will wait for
9077 // some time to see if data becomes available. If (and
9078 // only if) another thread places one or more object(s)
9079 // on the global list before we have returned the suffix
9080 // to the global list, we will walk down our local list
9081 // to find its end and append the global list to
9082 // our suffix before returning it. This suffix walk can
9083 // prove to be expensive (quadratic in the amount of traffic)
9084 // when there are many objects in the overflow list and
9085 // there is much producer-consumer contention on the list.
9086 // *NOTE*: The overflow list manipulation code here and
9087 // in ParNewGeneration:: are very similar in shape,
9088 // except that in the ParNew case we use the old (from/eden)
9089 // copy of the object to thread the list via its klass word.
9090 // Because of the common code, if you make any changes in
9091 // the code below, please check the ParNew version to see if
9092 // similar changes might be needed.
9093 // CR 6797058 has been filed to consolidate the common code.
9094 bool CMSCollector::par_take_from_overflow_list(size_t num,
9095                                                OopTaskQueue* work_q,
9096                                                int no_of_gc_threads) {
9097   assert(work_q->size() == 0, "First empty local work queue");
9098   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9099   if (_overflow_list == NULL) {
9100     return false;
9101   }
9102   // Grab the entire list; we'll put back a suffix
9103   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9104   Thread* tid = Thread::current();
9105   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9106   // set to ParallelGCThreads.
9107   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9108   size_t sleep_time_millis = MAX2((size_t)1, num/100);
9109   // If the list is busy, we spin for a short while,
9110   // sleeping between attempts to get the list.
9111   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9112     os::sleep(tid, sleep_time_millis, false);
9113     if (_overflow_list == NULL) {
9114       // Nothing left to take
9115       return false;
9116     } else if (_overflow_list != BUSY) {
9117       // Try and grab the prefix
9118       prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9119     }
9120   }
9121   // If the list was found to be empty, or we spun long
9122   // enough, we give up and return empty-handed. If we leave
9123   // the list in the BUSY state below, it must be the case that
9124   // some other thread holds the overflow list and will set it
9125   // to a non-BUSY state in the future.
9126   if (prefix == NULL || prefix == BUSY) {
9127      // Nothing to take or waited long enough
9128      if (prefix == NULL) {
9129        // Write back the NULL in case we overwrote it with BUSY above
9130        // and it is still the same value.
9131        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9132      }
9133      return false;
9134   }
9135   assert(prefix != NULL && prefix != BUSY, "Error");
9136   size_t i = num;
9137   oop cur = prefix;
9138   // Walk down the first "num" objects, unless we reach the end.
9139   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9140   if (cur->mark() == NULL) {
9141     // We have "num" or fewer elements in the list, so there
9142     // is nothing to return to the global list.
9143     // Write back the NULL in lieu of the BUSY we wrote
9144     // above, if it is still the same value.
9145     if (_overflow_list == BUSY) {
9146       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9147     }
9148   } else {
9149     // Chop off the suffix and return it to the global list.
9150     assert(cur->mark() != BUSY, "Error");
9151     oop suffix_head = cur->mark(); // suffix will be put back on global list
9152     cur->set_mark(NULL);           // break off suffix
9153     // It's possible that the list is still in the empty(busy) state
9154     // we left it in a short while ago; in that case we may be
9155     // able to place back the suffix without incurring the cost
9156     // of a walk down the list.
9157     oop observed_overflow_list = _overflow_list;
9158     oop cur_overflow_list = observed_overflow_list;
9159     bool attached = false;
9160     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9161       observed_overflow_list =
9162         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9163       if (cur_overflow_list == observed_overflow_list) {
9164         attached = true;
9165         break;
9166       } else cur_overflow_list = observed_overflow_list;
9167     }
9168     if (!attached) {
9169       // Too bad, someone else sneaked in (at least) an element; we'll need
9170       // to do a splice. Find tail of suffix so we can prepend suffix to global
9171       // list.
9172       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9173       oop suffix_tail = cur;
9174       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9175              "Tautology");
9176       observed_overflow_list = _overflow_list;
9177       do {
9178         cur_overflow_list = observed_overflow_list;
9179         if (cur_overflow_list != BUSY) {
9180           // Do the splice ...
9181           suffix_tail->set_mark(markOop(cur_overflow_list));
9182         } else { // cur_overflow_list == BUSY
9183           suffix_tail->set_mark(NULL);
9184         }
9185         // ... and try to place spliced list back on overflow_list ...
9186         observed_overflow_list =
9187           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9188       } while (cur_overflow_list != observed_overflow_list);
9189       // ... until we have succeeded in doing so.
9190     }
9191   }
9192 
9193   // Push the prefix elements on work_q
9194   assert(prefix != NULL, "control point invariant");
9195   const markOop proto = markOopDesc::prototype();
9196   oop next;
9197   NOT_PRODUCT(ssize_t n = 0;)
9198   for (cur = prefix; cur != NULL; cur = next) {
9199     next = oop(cur->mark());
9200     cur->set_mark(proto);   // until proven otherwise
9201     assert(cur->is_oop(), "Should be an oop");
9202     bool res = work_q->push(cur);
9203     assert(res, "Bit off more than we can chew?");
9204     NOT_PRODUCT(n++;)
9205   }
9206 #ifndef PRODUCT
9207   assert(_num_par_pushes >= n, "Too many pops?");
9208   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9209 #endif
9210   return true;
9211 }
9212 
9213 // Single-threaded
9214 void CMSCollector::push_on_overflow_list(oop p) {
9215   NOT_PRODUCT(_num_par_pushes++;)
9216   assert(p->is_oop(), "Not an oop");
9217   preserve_mark_if_necessary(p);
9218   p->set_mark((markOop)_overflow_list);
9219   _overflow_list = p;
9220 }
9221 
9222 // Multi-threaded; use CAS to prepend to overflow list
9223 void CMSCollector::par_push_on_overflow_list(oop p) {
9224   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9225   assert(p->is_oop(), "Not an oop");
9226   par_preserve_mark_if_necessary(p);
9227   oop observed_overflow_list = _overflow_list;
9228   oop cur_overflow_list;
9229   do {
9230     cur_overflow_list = observed_overflow_list;
9231     if (cur_overflow_list != BUSY) {
9232       p->set_mark(markOop(cur_overflow_list));
9233     } else {
9234       p->set_mark(NULL);
9235     }
9236     observed_overflow_list =
9237       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9238   } while (cur_overflow_list != observed_overflow_list);
9239 }
9240 #undef BUSY
9241 
9242 // Single threaded
9243 // General Note on GrowableArray: pushes may silently fail
9244 // because we are (temporarily) out of C-heap for expanding
9245 // the stack. The problem is quite ubiquitous and affects
9246 // a lot of code in the JVM. The prudent thing for GrowableArray
9247 // to do (for now) is to exit with an error. However, that may
9248 // be too draconian in some cases because the caller may be
9249 // able to recover without much harm. For such cases, we
9250 // should probably introduce a "soft_push" method which returns
9251 // an indication of success or failure with the assumption that
9252 // the caller may be able to recover from a failure; code in
9253 // the VM can then be changed, incrementally, to deal with such
9254 // failures where possible, thus, incrementally hardening the VM
9255 // in such low resource situations.
9256 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9257   _preserved_oop_stack.push(p);
9258   _preserved_mark_stack.push(m);
9259   assert(m == p->mark(), "Mark word changed");
9260   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9261          "bijection");
9262 }
9263 
9264 // Single threaded
9265 void CMSCollector::preserve_mark_if_necessary(oop p) {
9266   markOop m = p->mark();
9267   if (m->must_be_preserved(p)) {
9268     preserve_mark_work(p, m);
9269   }
9270 }
9271 
9272 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9273   markOop m = p->mark();
9274   if (m->must_be_preserved(p)) {
9275     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9276     // Even though we read the mark word without holding
9277     // the lock, we are assured that it will not change
9278     // because we "own" this oop, so no other thread can
9279     // be trying to push it on the overflow list; see
9280     // the assertion in preserve_mark_work() that checks
9281     // that m == p->mark().
9282     preserve_mark_work(p, m);
9283   }
9284 }
9285 
9286 // We should be able to do this multi-threaded,
9287 // a chunk of stack being a task (this is
9288 // correct because each oop only ever appears
9289 // once in the overflow list. However, it's
9290 // not very easy to completely overlap this with
9291 // other operations, so will generally not be done
9292 // until all work's been completed. Because we
9293 // expect the preserved oop stack (set) to be small,
9294 // it's probably fine to do this single-threaded.
9295 // We can explore cleverer concurrent/overlapped/parallel
9296 // processing of preserved marks if we feel the
9297 // need for this in the future. Stack overflow should
9298 // be so rare in practice and, when it happens, its
9299 // effect on performance so great that this will
9300 // likely just be in the noise anyway.
9301 void CMSCollector::restore_preserved_marks_if_any() {
9302   assert(SafepointSynchronize::is_at_safepoint(),
9303          "world should be stopped");
9304   assert(Thread::current()->is_ConcurrentGC_thread() ||
9305          Thread::current()->is_VM_thread(),
9306          "should be single-threaded");
9307   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9308          "bijection");
9309 
9310   while (!_preserved_oop_stack.is_empty()) {
9311     oop p = _preserved_oop_stack.pop();
9312     assert(p->is_oop(), "Should be an oop");
9313     assert(_span.contains(p), "oop should be in _span");
9314     assert(p->mark() == markOopDesc::prototype(),
9315            "Set when taken from overflow list");
9316     markOop m = _preserved_mark_stack.pop();
9317     p->set_mark(m);
9318   }
9319   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9320          "stacks were cleared above");
9321 }
9322 
9323 #ifndef PRODUCT
9324 bool CMSCollector::no_preserved_marks() const {
9325   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9326 }
9327 #endif
9328 
9329 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9330 {
9331   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9332   CMSAdaptiveSizePolicy* size_policy =
9333     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9334   assert(size_policy->is_gc_cms_adaptive_size_policy(),
9335     "Wrong type for size policy");
9336   return size_policy;
9337 }
9338 
9339 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9340                                            size_t desired_promo_size) {
9341   if (cur_promo_size < desired_promo_size) {
9342     size_t expand_bytes = desired_promo_size - cur_promo_size;
9343     if (PrintAdaptiveSizePolicy && Verbose) {
9344       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9345         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9346         expand_bytes);
9347     }
9348     expand(expand_bytes,
9349            MinHeapDeltaBytes,
9350            CMSExpansionCause::_adaptive_size_policy);
9351   } else if (desired_promo_size < cur_promo_size) {
9352     size_t shrink_bytes = cur_promo_size - desired_promo_size;
9353     if (PrintAdaptiveSizePolicy && Verbose) {
9354       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9355         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9356         shrink_bytes);
9357     }
9358     shrink(shrink_bytes);
9359   }
9360 }
9361 
9362 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9363   GenCollectedHeap* gch = GenCollectedHeap::heap();
9364   CMSGCAdaptivePolicyCounters* counters =
9365     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9366   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9367     "Wrong kind of counters");
9368   return counters;
9369 }
9370 
9371 
9372 void ASConcurrentMarkSweepGeneration::update_counters() {
9373   if (UsePerfData) {
9374     _space_counters->update_all();
9375     _gen_counters->update_all();
9376     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9377     GenCollectedHeap* gch = GenCollectedHeap::heap();
9378     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9379     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9380       "Wrong gc statistics type");
9381     counters->update_counters(gc_stats_l);
9382   }
9383 }
9384 
9385 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9386   if (UsePerfData) {
9387     _space_counters->update_used(used);
9388     _space_counters->update_capacity();
9389     _gen_counters->update_all();
9390 
9391     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9392     GenCollectedHeap* gch = GenCollectedHeap::heap();
9393     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9394     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9395       "Wrong gc statistics type");
9396     counters->update_counters(gc_stats_l);
9397   }
9398 }
9399 
9400 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9401   assert_locked_or_safepoint(Heap_lock);
9402   assert_lock_strong(freelistLock());
9403   HeapWord* old_end = _cmsSpace->end();
9404   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9405   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9406   FreeChunk* chunk_at_end = find_chunk_at_end();
9407   if (chunk_at_end == NULL) {
9408     // No room to shrink
9409     if (PrintGCDetails && Verbose) {
9410       gclog_or_tty->print_cr("No room to shrink: old_end  "
9411         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
9412         " chunk_at_end  " PTR_FORMAT,
9413         old_end, unallocated_start, chunk_at_end);
9414     }
9415     return;
9416   } else {
9417 
9418     // Find the chunk at the end of the space and determine
9419     // how much it can be shrunk.
9420     size_t shrinkable_size_in_bytes = chunk_at_end->size();
9421     size_t aligned_shrinkable_size_in_bytes =
9422       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9423     assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9424       "Inconsistent chunk at end of space");
9425     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9426     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9427 
9428     // Shrink the underlying space
9429     _virtual_space.shrink_by(bytes);
9430     if (PrintGCDetails && Verbose) {
9431       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9432         " desired_bytes " SIZE_FORMAT
9433         " shrinkable_size_in_bytes " SIZE_FORMAT
9434         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9435         "  bytes  " SIZE_FORMAT,
9436         desired_bytes, shrinkable_size_in_bytes,
9437         aligned_shrinkable_size_in_bytes, bytes);
9438       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
9439         "  unallocated_start  " SIZE_FORMAT,
9440         old_end, unallocated_start);
9441     }
9442 
9443     // If the space did shrink (shrinking is not guaranteed),
9444     // shrink the chunk at the end by the appropriate amount.
9445     if (((HeapWord*)_virtual_space.high()) < old_end) {
9446       size_t new_word_size =
9447         heap_word_size(_virtual_space.committed_size());
9448 
9449       // Have to remove the chunk from the dictionary because it is changing
9450       // size and might be someplace elsewhere in the dictionary.
9451 
9452       // Get the chunk at end, shrink it, and put it
9453       // back.
9454       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9455       size_t word_size_change = word_size_before - new_word_size;
9456       size_t chunk_at_end_old_size = chunk_at_end->size();
9457       assert(chunk_at_end_old_size >= word_size_change,
9458         "Shrink is too large");
9459       chunk_at_end->set_size(chunk_at_end_old_size -
9460                           word_size_change);
9461       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9462         word_size_change);
9463 
9464       _cmsSpace->returnChunkToDictionary(chunk_at_end);
9465 
9466       MemRegion mr(_cmsSpace->bottom(), new_word_size);
9467       _bts->resize(new_word_size);  // resize the block offset shared array
9468       Universe::heap()->barrier_set()->resize_covered_region(mr);
9469       _cmsSpace->assert_locked();
9470       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9471 
9472       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9473 
9474       // update the space and generation capacity counters
9475       if (UsePerfData) {
9476         _space_counters->update_capacity();
9477         _gen_counters->update_all();
9478       }
9479 
9480       if (Verbose && PrintGCDetails) {
9481         size_t new_mem_size = _virtual_space.committed_size();
9482         size_t old_mem_size = new_mem_size + bytes;
9483         gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9484                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9485       }
9486     }
9487 
9488     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9489       "Inconsistency at end of space");
9490     assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9491       "Shrinking is inconsistent");
9492     return;
9493   }
9494 }
9495 // Transfer some number of overflown objects to usual marking
9496 // stack. Return true if some objects were transferred.
9497 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9498   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9499                     (size_t)ParGCDesiredObjsFromOverflowList);
9500 
9501   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9502   assert(_collector->overflow_list_is_empty() || res,
9503          "If list is not empty, we should have taken something");
9504   assert(!res || !_mark_stack->isEmpty(),
9505          "If we took something, it should now be on our stack");
9506   return res;
9507 }
9508 
9509 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9510   size_t res = _sp->block_size_no_stall(addr, _collector);
9511   if (_sp->block_is_obj(addr)) {
9512     if (_live_bit_map->isMarked(addr)) {
9513       // It can't have been dead in a previous cycle
9514       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9515     } else {
9516       _dead_bit_map->mark(addr);      // mark the dead object
9517     }
9518   }
9519   // Could be 0, if the block size could not be computed without stalling.
9520   return res;
9521 }
9522 
9523 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9524 
9525   switch (phase) {
9526     case CMSCollector::InitialMarking:
9527       initialize(true  /* fullGC */ ,
9528                  cause /* cause of the GC */,
9529                  true  /* recordGCBeginTime */,
9530                  true  /* recordPreGCUsage */,
9531                  false /* recordPeakUsage */,
9532                  false /* recordPostGCusage */,
9533                  true  /* recordAccumulatedGCTime */,
9534                  false /* recordGCEndTime */,
9535                  false /* countCollection */  );
9536       break;
9537 
9538     case CMSCollector::FinalMarking:
9539       initialize(true  /* fullGC */ ,
9540                  cause /* cause of the GC */,
9541                  false /* recordGCBeginTime */,
9542                  false /* recordPreGCUsage */,
9543                  false /* recordPeakUsage */,
9544                  false /* recordPostGCusage */,
9545                  true  /* recordAccumulatedGCTime */,
9546                  false /* recordGCEndTime */,
9547                  false /* countCollection */  );
9548       break;
9549 
9550     case CMSCollector::Sweeping:
9551       initialize(true  /* fullGC */ ,
9552                  cause /* cause of the GC */,
9553                  false /* recordGCBeginTime */,
9554                  false /* recordPreGCUsage */,
9555                  true  /* recordPeakUsage */,
9556                  true  /* recordPostGCusage */,
9557                  false /* recordAccumulatedGCTime */,
9558                  true  /* recordGCEndTime */,
9559                  true  /* countCollection */  );
9560       break;
9561 
9562     default:
9563       ShouldNotReachHere();
9564   }
9565 }