changeset 6542:935f879e4eb0

8016302: Change type of the number of GC workers to unsigned int (2) Reviewed-by: tschatzl, jwilhelm
author vkempik
date Wed, 19 Apr 2017 04:15:53 +0100
parents e3c151303b10
children bb6a7cb5339b
files src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp src/share/vm/gc_implementation/g1/concurrentMark.cpp src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp src/share/vm/gc_implementation/g1/g1HotCardCache.cpp src/share/vm/gc_implementation/g1/g1HotCardCache.hpp src/share/vm/gc_implementation/g1/g1OopClosures.hpp src/share/vm/gc_implementation/g1/g1RemSet.cpp src/share/vm/gc_implementation/g1/g1RemSet.hpp src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp src/share/vm/gc_implementation/g1/satbQueue.cpp src/share/vm/gc_implementation/g1/satbQueue.hpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp
diffstat 22 files changed, 104 insertions(+), 106 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -56,10 +56,10 @@
 
   _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC);
 
-  int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
+  uint worker_id_offset = DirtyCardQueueSet::num_par_ids();
 
   ConcurrentG1RefineThread *next = NULL;
-  for (int i = _n_threads - 1; i >= 0; i--) {
+  for (uint i = _n_threads - 1; i != UINT_MAX; i--) {
     ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
     assert(t != NULL, "Conc refine should have been created");
     assert(t->cg1r() == this, "Conc refine thread should refer to this");
@@ -82,7 +82,7 @@
 
 void ConcurrentG1Refine::stop() {
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       _threads[i]->stop();
     }
   }
@@ -91,7 +91,7 @@
 void ConcurrentG1Refine::reinitialize_threads() {
   reset_threshold_step();
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       _threads[i]->initialize();
     }
   }
@@ -99,7 +99,7 @@
 
 ConcurrentG1Refine::~ConcurrentG1Refine() {
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       delete _threads[i];
     }
     FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC);
@@ -108,7 +108,7 @@
 
 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
   if (_threads != NULL) {
-    for (int i = 0; i < _n_threads; i++) {
+    for (uint i = 0; i < _n_threads; i++) {
       tc->do_thread(_threads[i]);
     }
   }
@@ -116,20 +116,20 @@
 
 void ConcurrentG1Refine::worker_threads_do(ThreadClosure * tc) {
   if (_threads != NULL) {
-    for (int i = 0; i < worker_thread_num(); i++) {
+    for (uint i = 0; i < worker_thread_num(); i++) {
       tc->do_thread(_threads[i]);
     }
   }
 }
 
-int ConcurrentG1Refine::thread_num() {
-  int n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
+uint ConcurrentG1Refine::thread_num() {
+  uint n_threads = (G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads
                                                 : ParallelGCThreads;
-  return MAX2<int>(n_threads, 1);
+  return MAX2<uint>(n_threads, 1);
 }
 
 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
-  for (int i = 0; i < _n_threads; ++i) {
+  for (uint i = 0; i < _n_threads; ++i) {
     _threads[i]->print_on(st);
     st->cr();
   }
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -39,8 +39,8 @@
 
 class ConcurrentG1Refine: public CHeapObj<mtGC> {
   ConcurrentG1RefineThread** _threads;
-  int _n_threads;
-  int _n_worker_threads;
+  uint _n_threads;
+  uint _n_worker_threads;
  /*
   * The value of the update buffer queue length falls into one of 3 zones:
   * green, yellow, red. If the value is in [0, green) nothing is
@@ -88,7 +88,7 @@
   // The RS sampling thread
   ConcurrentG1RefineThread * sampling_thread() const;
 
-  static int thread_num();
+  static uint thread_num();
 
   void print_worker_threads_on(outputStream* st) const;
 
@@ -100,8 +100,8 @@
   int yellow_zone() const     { return _yellow_zone; }
   int red_zone() const        { return _red_zone;    }
 
-  int total_thread_num() const  { return _n_threads;        }
-  int worker_thread_num() const { return _n_worker_threads; }
+  uint total_thread_num() const  { return _n_threads;        }
+  uint worker_thread_num() const { return _n_worker_threads; }
 
   int thread_threshold_step() const { return _thread_threshold_step; }
 
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -33,7 +33,7 @@
 
 ConcurrentG1RefineThread::
 ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread *next,
-                         int worker_id_offset, int worker_id) :
+                         uint worker_id_offset, uint worker_id) :
   ConcurrentGCThread(),
   _worker_id_offset(worker_id_offset),
   _worker_id(worker_id),
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -38,8 +38,8 @@
 
   double _vtime_start;  // Initial virtual time.
   double _vtime_accum;  // Initial virtual time.
-  int _worker_id;
-  int _worker_id_offset;
+  uint _worker_id;
+  uint _worker_id_offset;
 
   // The refinement threads collection is linked list. A predecessor can activate a successor
   // when the number of the rset update buffer crosses a certain threshold. A successor
@@ -71,7 +71,7 @@
   virtual void run();
   // Constructor
   ConcurrentG1RefineThread(ConcurrentG1Refine* cg1r, ConcurrentG1RefineThread* next,
-                           int worker_id_offset, int worker_id);
+                           uint worker_id_offset, uint worker_id);
 
   void initialize();
 
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -1680,7 +1680,6 @@
 
 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
   G1CollectedHeap* _g1;
-  int _worker_num;
   size_t _max_live_bytes;
   uint _regions_claimed;
   size_t _freed_bytes;
@@ -1693,12 +1692,11 @@
 
 public:
   G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
-                             int worker_num,
                              FreeRegionList* local_cleanup_list,
                              OldRegionSet* old_proxy_set,
                              HumongousRegionSet* humongous_proxy_set,
                              HRRSCleanupTask* hrrs_cleanup_task) :
-    _g1(g1), _worker_num(worker_num),
+    _g1(g1),
     _max_live_bytes(0), _regions_claimed(0),
     _freed_bytes(0),
     _claimed_region_time(0.0), _max_region_time(0.0),
@@ -1762,7 +1760,7 @@
     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
     HRRSCleanupTask hrrs_cleanup_task;
-    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
+    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
                                            &old_proxy_set,
                                            &humongous_proxy_set,
                                            &hrrs_cleanup_task);
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -48,12 +48,12 @@
 
 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
                                    bool consume,
-                                   size_t worker_i) {
+                                   uint worker_i) {
   bool res = true;
   if (_buf != NULL) {
     res = apply_closure_to_buffer(cl, _buf, _index, _sz,
                                   consume,
-                                  (int) worker_i);
+                                  worker_i);
     if (res && consume) _index = _sz;
   }
   return res;
@@ -63,7 +63,7 @@
                                              void** buf,
                                              size_t index, size_t sz,
                                              bool consume,
-                                             int worker_i) {
+                                             uint worker_i) {
   if (cl == NULL) return true;
   for (size_t i = index; i < sz; i += oopSize) {
     int ind = byte_index_to_index((int)i);
@@ -93,8 +93,8 @@
 }
 
 // Determines how many mutator threads can process the buffers in parallel.
-size_t DirtyCardQueueSet::num_par_ids() {
-  return os::processor_count();
+uint DirtyCardQueueSet::num_par_ids() {
+  return (uint)os::processor_count();
 }
 
 void DirtyCardQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock,
@@ -117,7 +117,7 @@
 }
 
 void DirtyCardQueueSet::iterate_closure_all_threads(bool consume,
-                                                    size_t worker_i) {
+                                                    uint worker_i) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
   for(JavaThread* t = Threads::first(); t; t = t->next()) {
     bool b = t->dirty_card_queue().apply_closure(_closure, consume);
@@ -140,11 +140,11 @@
 
   // We get the the number of any par_id that this thread
   // might have already claimed.
-  int worker_i = thread->get_claimed_par_id();
+  uint worker_i = thread->get_claimed_par_id();
 
-  // If worker_i is not -1 then the thread has already claimed
+  // If worker_i is not UINT_MAX then the thread has already claimed
   // a par_id. We make note of it using the already_claimed value
-  if (worker_i != -1) {
+  if (worker_i != UINT_MAX) {
     already_claimed = true;
   } else {
 
@@ -156,7 +156,7 @@
   }
 
   bool b = false;
-  if (worker_i != -1) {
+  if (worker_i != UINT_MAX) {
     b = DirtyCardQueue::apply_closure_to_buffer(_closure, buf, 0,
                                                 _sz, true, worker_i);
     if (b) Atomic::inc(&_processed_buffers_mut);
@@ -168,8 +168,8 @@
       // we release the id
       _free_ids->release_par_id(worker_i);
 
-      // and set the claimed_id in the thread to -1
-      thread->set_claimed_par_id(-1);
+      // and set the claimed_id in the thread to UINT_MAX
+      thread->set_claimed_par_id(UINT_MAX);
     }
   }
   return b;
@@ -200,7 +200,7 @@
 
 bool DirtyCardQueueSet::
 apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
-                                         int worker_i,
+                                         uint worker_i,
                                          BufferNode* nd) {
   if (nd != NULL) {
     void **buf = BufferNode::make_buffer_from_node(nd);
@@ -222,7 +222,7 @@
 }
 
 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
-                                                          int worker_i,
+                                                          uint worker_i,
                                                           int stop_at,
                                                           bool during_pause) {
   assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
@@ -232,7 +232,7 @@
   return res;
 }
 
-bool DirtyCardQueueSet::apply_closure_to_completed_buffer(int worker_i,
+bool DirtyCardQueueSet::apply_closure_to_completed_buffer(uint worker_i,
                                                           int stop_at,
                                                           bool during_pause) {
   return apply_closure_to_completed_buffer(_closure, worker_i,
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -36,7 +36,7 @@
 public:
   // Process the card whose card table entry is "card_ptr".  If returns
   // "false", terminate the iteration early.
-  virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0;
+  virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0;
 };
 
 // A ptrQueue whose elements are "oops", pointers to object heads.
@@ -53,7 +53,7 @@
   // deletes processed entries from logs.
   bool apply_closure(CardTableEntryClosure* cl,
                      bool consume = true,
-                     size_t worker_i = 0);
+                     uint worker_i = 0);
 
   // Apply the closure to all elements of "buf", down to "index"
   // (inclusive.)  If returns "false", then a closure application returned
@@ -63,7 +63,7 @@
   static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
                                       void** buf, size_t index, size_t sz,
                                       bool consume = true,
-                                      int worker_i = 0);
+                                      uint worker_i = 0);
   void **get_buf() { return _buf;}
   void set_buf(void **buf) {_buf = buf;}
   size_t get_index() { return _index;}
@@ -98,7 +98,7 @@
 
   // The number of parallel ids that can be claimed to allow collector or
   // mutator threads to do card-processing work.
-  static size_t num_par_ids();
+  static uint num_par_ids();
 
   static void handle_zero_index_for_thread(JavaThread* t);
 
@@ -115,7 +115,7 @@
   // change in the future.)  If "consume" is true, processed entries are
   // discarded.
   void iterate_closure_all_threads(bool consume = true,
-                                   size_t worker_i = 0);
+                                   uint worker_i = 0);
 
   // If there exists some completed buffer, pop it, then apply the
   // registered closure to all its elements, nulling out those elements
@@ -124,7 +124,7 @@
   // but is only partially completed before a "yield" happens, the
   // partially completed buffer (with its processed elements set to NULL)
   // is returned to the completed buffer set, and this call returns false.
-  bool apply_closure_to_completed_buffer(int worker_i = 0,
+  bool apply_closure_to_completed_buffer(uint worker_i = 0,
                                          int stop_at = 0,
                                          bool during_pause = false);
 
@@ -136,13 +136,13 @@
   // partially completed buffer (with its processed elements set to NULL)
   // is returned to the completed buffer set, and this call returns false.
   bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
-                                         int worker_i = 0,
+                                         uint worker_i = 0,
                                          int stop_at = 0,
                                          bool during_pause = false);
 
   // Helper routine for the above.
   bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl,
-                                                int worker_i,
+                                                uint worker_i,
                                                 BufferNode* nd);
 
   BufferNode* get_completed_buffer(int stop_at);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -102,7 +102,7 @@
                               ConcurrentG1Refine* cg1r) :
     _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true)
   {}
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false);
     // This path is executed by the concurrent refine or mutator threads,
     // concurrently, and so we do not care if card_ptr contains references
@@ -131,7 +131,7 @@
   {
     for (int i = 0; i < 256; i++) _histo[i] = 0;
   }
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
       _calls++;
       unsigned char* ujb = (unsigned char*)card_ptr;
@@ -160,7 +160,7 @@
   RedirtyLoggedCardTableEntryClosure() :
     _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
 
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
       _calls++;
       *card_ptr = 0;
@@ -172,7 +172,7 @@
 
 class RedirtyLoggedCardTableEntryFastClosure : public CardTableEntryClosure {
 public:
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     *card_ptr = CardTableModRefBS::dirty_card_val();
     return true;
   }
@@ -2335,7 +2335,7 @@
 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
                                                  DirtyCardQueue* into_cset_dcq,
                                                  bool concurrent,
-                                                 int worker_i) {
+                                                 uint worker_i) {
   // Clean cards in the hot card cache
   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
   hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
@@ -2908,7 +2908,7 @@
 
 // Given the id of a worker, obtain or calculate a suitable
 // starting region for iterating over the current collection set.
-HeapRegion* G1CollectedHeap::start_cset_region_for_worker(int worker_i) {
+HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
   assert(get_gc_time_stamp() > 0, "should have been updated by now");
 
   HeapRegion* result = NULL;
@@ -5106,7 +5106,7 @@
                         OopClosure* scan_non_heap_roots,
                         OopsInHeapRegionClosure* scan_rs,
                         OopsInGenClosure* scan_perm,
-                        int worker_i,
+                        uint worker_i,
                         bool manages_code_roots) {
 
   // First scan the strong roots, including the perm gen.
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -844,7 +844,7 @@
                                OopClosure* scan_non_heap_roots,
                                OopsInHeapRegionClosure* scan_rs,
                                OopsInGenClosure* scan_perm,
-                               int worker_i,
+                               uint worker_i,
                                bool manages_code_roots = false);
 
   // Apply "blk" to all the weak roots of the system.  These include
@@ -1176,7 +1176,7 @@
 
   void iterate_dirty_card_closure(CardTableEntryClosure* cl,
                                   DirtyCardQueue* into_cset_dcq,
-                                  bool concurrent, int worker_i);
+                                  bool concurrent, uint worker_i);
 
   // The shared block offset table array.
   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
@@ -1459,7 +1459,7 @@
 
   // Given the id of a worker, obtain or calculate a suitable
   // starting region for iterating over the current collection set.
-  HeapRegion* start_cset_region_for_worker(int worker_i);
+  HeapRegion* start_cset_region_for_worker(uint worker_i);
 
   // This is a convenience method that is used by the
   // HeapRegionIterator classes to calculate the starting region for
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -233,8 +233,8 @@
   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
 }
 
-void G1GCPhaseTimes::print_stats(int level, const char* str, double value, int workers) {
-  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
+void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
+  LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: " UINT32_FORMAT "]", str, value, workers);
 }
 
 double G1GCPhaseTimes::accounted_time_ms() {
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -150,7 +150,7 @@
 
   // Helper methods for detailed logging
   void print_stats(int level, const char* str, double value);
-  void print_stats(int level, const char* str, double value, int workers);
+  void print_stats(int level, const char* str, double value, uint workers);
 
  public:
   G1GCPhaseTimes(uint max_gc_threads);
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -44,9 +44,9 @@
     _hot_cache_idx = 0;
 
     // For refining the cards in the hot cache in parallel
-    int n_workers = (ParallelGCThreads > 0 ?
+    uint n_workers = (ParallelGCThreads > 0 ?
                         _g1h->workers()->total_workers() : 1);
-    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
+    _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
     _hot_cache_par_claimed_idx = 0;
 
     _card_counts.initialize();
@@ -89,7 +89,7 @@
   return res;
 }
 
-void G1HotCardCache::drain(int worker_i,
+void G1HotCardCache::drain(uint worker_i,
                            G1RemSet* g1rs,
                            DirtyCardQueue* into_cset_dcq) {
   if (!default_use_cache()) {
@@ -122,8 +122,8 @@
             // RSet updating while within an evacuation pause.
             // In this case worker_i should be the id of a GC worker thread
             assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
-            assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
-                   err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
+            assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
+                   err_msg("incorrect worker id: "UINT32_FORMAT, worker_i));
 
             into_cset_dcq->enqueue(card_ptr);
           }
--- a/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1HotCardCache.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -99,7 +99,7 @@
 
   // Refine the cards that have delayed as a result of
   // being in the cache.
-  void drain(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
+  void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
 
   // Set up for parallel processing of the cards in the hot cache
   void reset_hot_cache_claimed_index() {
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -281,14 +281,14 @@
   HeapRegion* _from;
   OopsInHeapRegionClosure* _push_ref_cl;
   bool _record_refs_into_cset;
-  int _worker_i;
+  uint _worker_i;
 
 public:
   G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
                                 G1RemSet* rs,
                                 OopsInHeapRegionClosure* push_ref_cl,
                                 bool record_refs_into_cset,
-                                int worker_i = 0);
+                                uint worker_i = 0);
 
   void set_from(HeapRegion* from) {
     assert(from != NULL, "from region must be non-NULL");
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -114,14 +114,14 @@
   G1SATBCardTableModRefBS *_ct_bs;
 
   double _strong_code_root_scan_time_sec;
-  int    _worker_i;
+  uint   _worker_i;
   int    _block_size;
   bool   _try_claimed;
 
 public:
   ScanRSClosure(OopsInHeapRegionClosure* oc,
                 CodeBlobToOopClosure* code_root_cl,
-                int worker_i) :
+                uint worker_i) :
     _oc(oc),
     _code_root_cl(code_root_cl),
     _strong_code_root_scan_time_sec(0.0),
@@ -163,7 +163,7 @@
 
   void printCard(HeapRegion* card_region, size_t card_index,
                  HeapWord* card_start) {
-    gclog_or_tty->print_cr("T %d Region [" PTR_FORMAT ", " PTR_FORMAT ") "
+    gclog_or_tty->print_cr("T " UINT32_FORMAT " Region [" PTR_FORMAT ", " PTR_FORMAT ") "
                            "RS names card %p: "
                            "[" PTR_FORMAT ", " PTR_FORMAT ")",
                            _worker_i,
@@ -243,7 +243,7 @@
 
 void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
                       CodeBlobToOopClosure* code_root_cl,
-                      int worker_i) {
+                      uint worker_i) {
   double rs_time_start = os::elapsedTime();
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
@@ -276,13 +276,13 @@
                                               DirtyCardQueue* into_cset_dcq) :
     _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
   {}
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     // The only time we care about recording cards that
     // contain references that point into the collection set
     // is during RSet updating within an evacuation pause.
     // In this case worker_i should be the id of a GC worker thread.
     assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
-    assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
+    assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker");
 
     if (_g1rs->refine_card(card_ptr, worker_i, true)) {
       // 'card_ptr' contains references that point into the collection
@@ -297,7 +297,7 @@
   }
 };
 
-void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
+void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
   double start = os::elapsedTime();
   // Apply the given closure to all remaining log entries.
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
@@ -322,14 +322,14 @@
 
 void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
                                            CodeBlobToOopClosure* code_root_cl,
-                                           int worker_i) {
+                                           uint worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
 #endif
 
   // We cache the value of 'oc' closure into the appropriate slot in the
   // _cset_rs_update_cl for this worker
-  assert(worker_i < (int)n_workers(), "sanity");
+  assert(worker_i < n_workers(), "sanity");
   _cset_rs_update_cl[worker_i] = oc;
 
   // A DirtyCardQueue that is used to hold cards containing references
@@ -406,7 +406,7 @@
     _g1(g1), _ct_bs(bs)
   { }
 
-  bool do_card_ptr(jbyte* card_ptr, int worker_i) {
+  bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     // Construct the region representing the card.
     HeapWord* start = _ct_bs->addr_for(card_ptr);
     // And find the region containing it.
@@ -547,7 +547,7 @@
                               G1RemSet* rs,
                               OopsInHeapRegionClosure* push_ref_cl,
                               bool record_refs_into_cset,
-                              int worker_i) :
+                              uint worker_i) :
   _g1(g1h), _g1_rem_set(rs), _from(NULL),
   _record_refs_into_cset(record_refs_into_cset),
   _push_ref_cl(push_ref_cl), _worker_i(worker_i) { }
@@ -556,7 +556,7 @@
 // into the collection set, if we're checking for such references;
 // false otherwise.
 
-bool G1RemSet::refine_card(jbyte* card_ptr, int worker_i,
+bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i,
                            bool check_for_refs_into_cset) {
 
   // If the card is no longer dirty, nothing to do.
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -97,7 +97,7 @@
   // In the sequential case this param will be ignored.
   void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
                                    CodeBlobToOopClosure* code_root_cl,
-                                   int worker_i);
+                                   uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
   // call.  Must call each of these once before and after (in sequential
@@ -109,9 +109,9 @@
 
   void scanRS(OopsInHeapRegionClosure* oc,
               CodeBlobToOopClosure* code_root_cl,
-              int worker_i);
+              uint worker_i);
 
-  void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i);
+  void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
 
   CardTableModRefBS* ct_bs() { return _ct_bs; }
   size_t cardsScanned() { return _total_cards_scanned; }
@@ -138,7 +138,7 @@
   // if the given card contains oops that have references into the
   // current collection set.
   virtual bool refine_card(jbyte* card_ptr,
-                           int worker_i,
+                           uint worker_i,
                            bool check_for_refs_into_cset);
 
   // Print accumulated summary info from the start of the VM.
@@ -171,12 +171,12 @@
 class UpdateRSOopClosure: public OopClosure {
   HeapRegion* _from;
   G1RemSet* _rs;
-  int _worker_i;
+  uint _worker_i;
 
   template <class T> void do_oop_work(T* p);
 
 public:
-  UpdateRSOopClosure(G1RemSet* rs, int worker_i = 0) :
+  UpdateRSOopClosure(G1RemSet* rs, uint worker_i = 0) :
     _from(NULL), _rs(rs), _worker_i(worker_i)
   {}
 
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -362,9 +362,9 @@
 void OtherRegionsTable::init_from_card_cache(size_t max_regions) {
   _from_card_cache_max_regions = max_regions;
 
-  int n_par_rs = HeapRegionRemSet::num_par_rem_sets();
+  uint n_par_rs = HeapRegionRemSet::num_par_rem_sets();
   _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs, mtGC);
-  for (int i = 0; i < n_par_rs; i++) {
+  for (uint i = 0; i < n_par_rs; i++) {
     _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions, mtGC);
     for (size_t j = 0; j < max_regions; j++) {
       _from_card_cache[i][j] = -1;  // An invalid value.
@@ -374,7 +374,7 @@
 }
 
 void OtherRegionsTable::shrink_from_card_cache(size_t new_n_regs) {
-  for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
+  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
     assert(new_n_regs <= _from_card_cache_max_regions, "Must be within max.");
     for (size_t j = new_n_regs; j < _from_card_cache_max_regions; j++) {
       _from_card_cache[i][j] = -1;  // An invalid value.
@@ -384,7 +384,7 @@
 
 #ifndef PRODUCT
 void OtherRegionsTable::print_from_card_cache() {
-  for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
+  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
     for (size_t j = 0; j < _from_card_cache_max_regions; j++) {
       gclog_or_tty->print_cr("_from_card_cache[%d][%d] = %d.",
                     i, j, _from_card_cache[i][j]);
@@ -730,7 +730,7 @@
 
 void OtherRegionsTable::clear_fcc() {
   size_t hrs_idx = hr()->hrs_index();
-  for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
+  for (uint i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
     _from_card_cache[i][hrs_idx] = -1;
   }
 }
@@ -766,7 +766,7 @@
   }
   // Check to see if any of the fcc entries come from here.
   size_t hr_ind = (size_t) hr()->hrs_index();
-  for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
+  for (uint tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
     int fcc_ent = _from_card_cache[tid][hr_ind];
     if (fcc_ent != -1) {
       HeapWord* card_addr = (HeapWord*)
@@ -841,8 +841,8 @@
 // Determines how many threads can add records to an rset in parallel.
 // This can be done by either mutator threads together with the
 // concurrent refinement threads or GC threads.
-int HeapRegionRemSet::num_par_rem_sets() {
-  return (int)MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), ParallelGCThreads);
+uint HeapRegionRemSet::num_par_rem_sets() {
+  return MAX2(DirtyCardQueueSet::num_par_ids() + ConcurrentG1Refine::thread_num(), (uint)ParallelGCThreads);
 }
 
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -221,7 +221,7 @@
   HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                    HeapRegion* hr);
 
-  static int num_par_rem_sets();
+  static uint num_par_rem_sets();
   static void setup_remset_size();
 
   HeapRegion* hr() const {
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -295,7 +295,7 @@
   shared_satb_queue()->apply_closure_and_empty(_closure);
 }
 
-void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
+void SATBMarkQueueSet::par_iterate_closure_all_threads(uint worker) {
   SharedHeap* sh = SharedHeap::heap();
   int parity = sh->strong_roots_parity();
 
@@ -320,7 +320,7 @@
 }
 
 bool SATBMarkQueueSet::apply_closure_to_completed_buffer_work(bool par,
-                                                              int worker) {
+                                                              uint worker) {
   BufferNode* nd = NULL;
   {
     MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
--- a/src/share/vm/gc_implementation/g1/satbQueue.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/gc_implementation/g1/satbQueue.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -84,7 +84,7 @@
   // Utility function to support sequential and parallel versions.  If
   // "par" is true, then "worker" is the par thread id; if "false", worker
   // is ignored.
-  bool apply_closure_to_completed_buffer_work(bool par, int worker);
+  bool apply_closure_to_completed_buffer_work(bool par, uint worker);
 
 #ifdef ASSERT
   void dump_active_values(JavaThread* first, bool expected_active);
@@ -123,7 +123,7 @@
   // be called serially and at a safepoint.
   void iterate_closure_all_threads();
   // Parallel version of the above.
-  void par_iterate_closure_all_threads(int worker);
+  void par_iterate_closure_all_threads(uint worker);
 
   // If there exists some completed buffer, pop it, then apply the
   // registered closure to all its elements, and return true.  If no
@@ -132,7 +132,7 @@
     return apply_closure_to_completed_buffer_work(false, 0);
   }
   // Parallel version of the above.
-  bool par_apply_closure_to_completed_buffer(int worker) {
+  bool par_apply_closure_to_completed_buffer(uint worker) {
     return apply_closure_to_completed_buffer_work(true, worker);
   }
 
--- a/src/share/vm/runtime/thread.cpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/runtime/thread.cpp	Wed Apr 19 04:15:53 2017 +0100
@@ -1403,8 +1403,8 @@
 void JavaThread::initialize() {
   // Initialize fields
 
-  // Set the claimed par_id to -1 (ie not claiming any par_ids)
-  set_claimed_par_id(-1);
+  // Set the claimed par_id to UINT_MAX (ie not claiming any par_ids)
+  set_claimed_par_id(UINT_MAX);
 
   set_saved_exception_pc(NULL);
   set_threadObj(NULL);
--- a/src/share/vm/runtime/thread.hpp	Thu Jul 28 16:09:31 2016 +0300
+++ b/src/share/vm/runtime/thread.hpp	Wed Apr 19 04:15:53 2017 +0100
@@ -1762,12 +1762,12 @@
   void set_done_attaching_via_jni() { _jni_attach_state = _attached_via_jni; OrderAccess::fence(); }
 private:
   // This field is used to determine if a thread has claimed
-  // a par_id: it is -1 if the thread has not claimed a par_id;
+  // a par_id: it is UINT_MAX if the thread has not claimed a par_id;
   // otherwise its value is the par_id that has been claimed.
-  int _claimed_par_id;
+  uint _claimed_par_id;
 public:
-  int get_claimed_par_id() { return _claimed_par_id; }
-  void set_claimed_par_id(int id) { _claimed_par_id = id;}
+  uint get_claimed_par_id() { return _claimed_par_id; }
+  void set_claimed_par_id(uint id) { _claimed_par_id = id;}
 };
 
 // Inline implementation of JavaThread::current