Blame SOURCES/java-1.8.0-openjdk-size_t.patch

9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	2014-09-04 22:47:14.059845388 -0400
9bf359
@@ -2686,7 +2686,7 @@
9bf359
   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
9bf359
     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
9bf359
     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
9bf359
-    n_blks = MIN2(n_blks, CMSOldPLABMax);
9bf359
+    n_blks = MIN2(n_blks, (size_t)CMSOldPLABMax);
9bf359
   }
9bf359
   assert(n_blks > 0, "Error");
9bf359
   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	2014-09-04 22:47:14.061845394 -0400
9bf359
@@ -950,7 +950,7 @@
9bf359
   if (free_percentage < desired_free_percentage) {
9bf359
     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
9bf359
     assert(desired_capacity >= capacity(), "invalid expansion size");
9bf359
-    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
9bf359
+    size_t expand_bytes = MAX2(desired_capacity - capacity(), (size_t)MinHeapDeltaBytes);
9bf359
     if (PrintGCDetails && Verbose) {
9bf359
       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
9bf359
       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
9bf359
@@ -6559,7 +6559,7 @@
9bf359
     HeapWord* curAddr = _markBitMap.startWord();
9bf359
     while (curAddr < _markBitMap.endWord()) {
9bf359
       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
9bf359
-      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
9bf359
+      MemRegion chunk(curAddr, MIN2((size_t)CMSBitMapYieldQuantum, remaining));
9bf359
       _markBitMap.clear_large_range(chunk);
9bf359
       if (ConcurrentMarkSweepThread::should_yield() &&
9bf359
           !foregroundGCIsActive() &&
9bf359
@@ -6858,7 +6858,7 @@
9bf359
     return;
9bf359
   }
9bf359
   // Double capacity if possible
9bf359
-  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
9bf359
+  size_t new_capacity = MIN2(_capacity*2, (size_t)MarkStackSizeMax);
9bf359
   // Do not give up existing stack until we have managed to
9bf359
   // get the double capacity that we desired.
9bf359
   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp jdk8/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	2014-09-04 22:47:14.063845410 -0400
9bf359
@@ -3767,7 +3767,7 @@
9bf359
   // of things to do) or totally (at the very end).
9bf359
   size_t target_size;
9bf359
   if (partially) {
9bf359
-    target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
9bf359
+    target_size = MIN2((size_t)(_task_queue->max_elems()/3), (size_t) GCDrainStackTargetSize);
9bf359
   } else {
9bf359
     target_size = 0;
9bf359
   }
9bf359
@@ -4605,7 +4605,7 @@
9bf359
   // The > 0 check is to deal with the prev and next live bytes which
9bf359
   // could be 0.
9bf359
   if (*hum_bytes > 0) {
9bf359
-    bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
9bf359
+    bytes = MIN2(HeapRegion::GrainBytes, (size_t)*hum_bytes);
9bf359
     *hum_bytes -= bytes;
9bf359
   }
9bf359
   return bytes;
9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	2014-09-04 22:47:14.065845427 -0400
9bf359
@@ -1730,7 +1730,7 @@
9bf359
 
9bf359
   verify_region_sets_optional();
9bf359
 
9bf359
-  size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
9bf359
+  size_t expand_bytes = MAX2(word_size * HeapWordSize, (size_t)MinHeapDeltaBytes);
9bf359
   ergo_verbose1(ErgoHeapSizing,
9bf359
                 "attempt heap expansion",
9bf359
                 ergo_format_reason("allocation request failed")
9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp jdk8/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	2014-09-04 22:47:14.065845427 -0400
9bf359
@@ -160,7 +160,7 @@
9bf359
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
9bf359
     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
9bf359
     region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
9bf359
-                       (uintx) MIN_REGION_SIZE);
9bf359
+                       (size_t) MIN_REGION_SIZE);
9bf359
   }
9bf359
 
9bf359
   int region_size_log = log2_long((jlong) region_size);
9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp	2014-09-04 22:47:14.067845443 -0400
9bf359
@@ -71,7 +71,7 @@
9bf359
   if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) {
9bf359
     _region_start = covered_region.start();
9bf359
     _region_size = covered_region.word_size();
9bf359
-    idx_t* map = (idx_t*)_virtual_space->reserved_low_addr();
9bf359
+    BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
9bf359
     _beg_bits.set_map(map);
9bf359
     _beg_bits.set_size(bits / 2);
9bf359
     _end_bits.set_map(map + words / 2);
9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	2014-09-04 22:47:14.068845451 -0400
9bf359
@@ -910,8 +910,8 @@
9bf359
 void PSParallelCompact::initialize_dead_wood_limiter()
9bf359
 {
9bf359
   const size_t max = 100;
9bf359
-  _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
9bf359
-  _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
9bf359
+  _dwl_mean = double(MIN2((size_t)ParallelOldDeadWoodLimiterMean, max)) / 100.0;
9bf359
+  _dwl_std_dev = double(MIN2((size_t)ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
9bf359
   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
9bf359
   DEBUG_ONLY(_dwl_initialized = true;)
9bf359
   _dwl_adjustment = normal_distribution(1.0);
9bf359
diff -ruN jdk8/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp jdk8/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	2014-09-04 22:47:14.068845451 -0400
9bf359
@@ -194,7 +194,7 @@
9bf359
   const size_t num_overflow_elems = of_stack->size();
9bf359
   const size_t space_available = queue->max_elems() - queue->size();
9bf359
   const size_t num_take_elems = MIN3(space_available / 4,
9bf359
-                                     ParGCDesiredObjsFromOverflowList,
9bf359
+                                     (size_t)ParGCDesiredObjsFromOverflowList,
9bf359
                                      num_overflow_elems);
9bf359
   // Transfer the most recent num_take_elems from the overflow
9bf359
   // stack to our work queue.
9bf359
diff -ruN jdk8/hotspot/src/share/vm/memory/collectorPolicy.cpp jdk8/hotspot/src/share/vm/memory/collectorPolicy.cpp
9bf359
--- jdk8/hotspot/src/share/vm/memory/collectorPolicy.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/memory/collectorPolicy.cpp	2014-09-04 22:55:49.271922585 -0400
9bf359
@@ -389,7 +389,7 @@
9bf359
       uintx calculated_size = NewSize + OldSize;
9bf359
       double shrink_factor = (double) MaxHeapSize / calculated_size;
9bf359
       uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment);
9bf359
-      FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
9bf359
+      FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), (size_t)smaller_new_size));
9bf359
       _initial_gen0_size = NewSize;
9bf359
 
9bf359
       // OldSize is already aligned because above we aligned MaxHeapSize to
9bf359
@@ -437,7 +437,7 @@
9bf359
     // yield a size that is too small) and bound it by MaxNewSize above.
9bf359
     // Ergonomics plays here by previously calculating the desired
9bf359
     // NewSize and MaxNewSize.
9bf359
-    max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
9bf359
+    max_new_size = MIN2(MAX2(max_new_size, (size_t)NewSize), (size_t)MaxNewSize);
9bf359
   }
9bf359
   assert(max_new_size > 0, "All paths should set max_new_size");
9bf359
 
9bf359
@@ -459,23 +459,23 @@
9bf359
       // lower limit.
9bf359
       _min_gen0_size = NewSize;
9bf359
       desired_new_size = NewSize;
9bf359
-      max_new_size = MAX2(max_new_size, NewSize);
9bf359
+      max_new_size = MAX2(max_new_size, (size_t)NewSize);
9bf359
     } else if (FLAG_IS_ERGO(NewSize)) {
9bf359
       // If NewSize is set ergonomically, we should use it as a lower
9bf359
       // limit, but use NewRatio to calculate the initial size.
9bf359
       _min_gen0_size = NewSize;
9bf359
       desired_new_size =
9bf359
-        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
9bf359
-      max_new_size = MAX2(max_new_size, NewSize);
9bf359
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), (size_t)NewSize);
9bf359
+      max_new_size = MAX2(max_new_size, (size_t)NewSize);
9bf359
     } else {
9bf359
       // For the case where NewSize is the default, use NewRatio
9bf359
       // to size the minimum and initial generation sizes.
9bf359
       // Use the default NewSize as the floor for these values.  If
9bf359
       // NewRatio is overly large, the resulting sizes can be too
9bf359
       // small.
9bf359
-      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
9bf359
+      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), (size_t)NewSize);
9bf359
       desired_new_size =
9bf359
-        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
9bf359
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), (size_t)NewSize);
9bf359
     }
9bf359
 
9bf359
     assert(_min_gen0_size > 0, "Sanity check");
9bf359
@@ -577,7 +577,7 @@
9bf359
   } else {
9bf359
     // It's been explicitly set on the command line.  Use the
9bf359
     // OldSize and then determine the consequences.
9bf359
-    _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
9bf359
+    _min_gen1_size = MIN2((size_t)OldSize, _min_heap_byte_size - _min_gen0_size);
9bf359
     _initial_gen1_size = OldSize;
9bf359
 
9bf359
     // If the user has explicitly set an OldSize that is inconsistent
9bf359
diff -ruN jdk8/hotspot/src/share/vm/memory/metaspace.cpp jdk8/hotspot/src/share/vm/memory/metaspace.cpp
9bf359
--- jdk8/hotspot/src/share/vm/memory/metaspace.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/memory/metaspace.cpp	2014-09-04 22:47:14.071845475 -0400
9bf359
@@ -1431,7 +1431,7 @@
9bf359
 
9bf359
 void MetaspaceGC::post_initialize() {
9bf359
   // Reset the high-water mark once the VM initialization is done.
9bf359
-  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
9bf359
+  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), (size_t)MetaspaceSize);
9bf359
 }
9bf359
 
9bf359
 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
9bf359
@@ -1491,7 +1491,7 @@
9bf359
     (size_t)MIN2(min_tmp, double(max_uintx));
9bf359
   // Don't shrink less than the initial generation size
9bf359
   minimum_desired_capacity = MAX2(minimum_desired_capacity,
9bf359
-                                  MetaspaceSize);
9bf359
+                                  (size_t)MetaspaceSize);
9bf359
 
9bf359
   if (PrintGCDetails && Verbose) {
9bf359
     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
9bf359
@@ -1546,7 +1546,7 @@
9bf359
     const double max_tmp = used_after_gc / minimum_used_percentage;
9bf359
     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
9bf359
     maximum_desired_capacity = MAX2(maximum_desired_capacity,
9bf359
-                                    MetaspaceSize);
9bf359
+                                    (size_t)MetaspaceSize);
9bf359
     if (PrintGCDetails && Verbose) {
9bf359
       gclog_or_tty->print_cr("  "
9bf359
                              "  maximum_free_percentage: %6.2f"
9bf359
@@ -3197,7 +3197,7 @@
9bf359
     // on the medium chunk list.   The next chunk will be small and progress
9bf359
     // from there.  This size calculated by -version.
9bf359
     _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
9bf359
-                                       (CompressedClassSpaceSize/BytesPerWord)*2);
9bf359
+                                       (size_t)(CompressedClassSpaceSize/BytesPerWord)*2);
9bf359
     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
9bf359
     // Arbitrarily set the initial virtual space to a multiple
9bf359
     // of the boot class loader size.
9bf359
diff -ruN jdk8/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp jdk8/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp
9bf359
--- jdk8/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	2014-09-04 22:47:14.071845475 -0400
9bf359
@@ -48,7 +48,7 @@
9bf359
   const size_t beg_index = size_t(index);
9bf359
   assert(beg_index < len || len == 0, "index too large");
9bf359
 
9bf359
-  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
9bf359
+  const size_t stride = MIN2(len - beg_index, (size_t)ObjArrayMarkingStride);
9bf359
   const size_t end_index = beg_index + stride;
9bf359
   T* const base = (T*)a->base();
9bf359
   T* const beg = base + beg_index;
9bf359
@@ -82,7 +82,7 @@
9bf359
   const size_t beg_index = size_t(index);
9bf359
   assert(beg_index < len || len == 0, "index too large");
9bf359
 
9bf359
-  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
9bf359
+  const size_t stride = MIN2(len - beg_index, (size_t)ObjArrayMarkingStride);
9bf359
   const size_t end_index = beg_index + stride;
9bf359
   T* const base = (T*)a->base();
9bf359
   T* const beg = base + beg_index;
9bf359
diff -ruN jdk8/hotspot/src/share/vm/runtime/arguments.cpp jdk8/hotspot/src/share/vm/runtime/arguments.cpp
9bf359
--- jdk8/hotspot/src/share/vm/runtime/arguments.cpp	2014-07-30 06:51:43.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/runtime/arguments.cpp	2014-09-04 22:47:14.072845483 -0400
9bf359
@@ -1244,7 +1244,7 @@
9bf359
     // NewSize was set on the command line and it is larger than
9bf359
     // preferred_max_new_size.
9bf359
     if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
9bf359
-      FLAG_SET_ERGO(uintx, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
9bf359
+      FLAG_SET_ERGO(uintx, MaxNewSize, MAX2((size_t)NewSize, preferred_max_new_size));
9bf359
     } else {
9bf359
       FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
9bf359
     }
9bf359
@@ -1269,8 +1269,8 @@
9bf359
       // Unless explicitly requested otherwise, make young gen
9bf359
       // at least min_new, and at most preferred_max_new_size.
9bf359
       if (FLAG_IS_DEFAULT(NewSize)) {
9bf359
-        FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
9bf359
-        FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
9bf359
+        FLAG_SET_ERGO(uintx, NewSize, MAX2((size_t)NewSize, min_new));
9bf359
+        FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, (size_t)NewSize));
9bf359
         if (PrintGCDetails && Verbose) {
9bf359
           // Too early to use gclog_or_tty
9bf359
           tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
9bf359
@@ -1280,7 +1280,7 @@
9bf359
       // so it's NewRatio x of NewSize.
9bf359
       if (FLAG_IS_DEFAULT(OldSize)) {
9bf359
         if (max_heap > NewSize) {
9bf359
-          FLAG_SET_ERGO(uintx, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
9bf359
+          FLAG_SET_ERGO(uintx, OldSize, MIN2((size_t)(NewRatio*NewSize), max_heap - NewSize));
9bf359
           if (PrintGCDetails && Verbose) {
9bf359
             // Too early to use gclog_or_tty
9bf359
             tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
9bf359
@@ -1401,7 +1401,7 @@
9bf359
   return true;
9bf359
 }
9bf359
 
9bf359
-uintx Arguments::max_heap_for_compressed_oops() {
9bf359
+size_t Arguments::max_heap_for_compressed_oops() {
9bf359
   // Avoid sign flip.
9bf359
   assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
9bf359
   // We need to fit both the NULL page and the heap into the memory budget, while
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp      2014-06-12 03:58:35.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp      2014-06-12 03:58:35.000000000 -0400
9bf359
@@ -38,7 +38,7 @@
9bf359
   _cancel(false),
9bf359
   _empty(true),
9bf359
   _dropped(0) {
9bf359
-  _nqueues = MAX2(ParallelGCThreads, (size_t)1);
9bf359
+  _nqueues = MAX2(ParallelGCThreads, (uintx)1);
9bf359
   _queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC);
9bf359
   for (size_t i = 0; i < _nqueues; i++) {
9bf359
     new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size);
9bf359
--- jdk8/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp      2014-06-12 03:58:35.000000000 -0400
9bf359
+++ jdk8/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp      2014-06-12 03:58:35.000000000 -0400
9bf359
@@ -110,7 +110,7 @@
9bf359
 };
9bf359
 
9bf359
 G1StringDedupEntryCache::G1StringDedupEntryCache() {
9bf359
-  _nlists = MAX2(ParallelGCThreads, (size_t)1);
9bf359
+  _nlists = MAX2(ParallelGCThreads, (uintx)1);
9bf359
   _lists = PaddedArray<G1StringDedupEntryFreeList, mtGC>::create_unfreeable((uint)_nlists);
9bf359
 }
9bf359