Blame SOURCES/jdk8203030-zero_s390_31_bit_size_t_type_conflicts_in_shared_code.patch

53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -2689,7 +2689,7 @@
4ca1da
   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
4ca1da
     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
4ca1da
     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
4ca1da
-    n_blks = MIN2(n_blks, CMSOldPLABMax);
4ca1da
+    n_blks = MIN2(n_blks, (size_t)CMSOldPLABMax);
4ca1da
   }
4ca1da
   assert(n_blks > 0, "Error");
4ca1da
   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -961,7 +961,7 @@
4ca1da
   if (free_percentage < desired_free_percentage) {
4ca1da
     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
4ca1da
     assert(desired_capacity >= capacity(), "invalid expansion size");
4ca1da
-    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
4ca1da
+    size_t expand_bytes = MAX2(desired_capacity - capacity(), (size_t)MinHeapDeltaBytes);
4ca1da
     if (PrintGCDetails && Verbose) {
4ca1da
       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
4ca1da
       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
53732a
@@ -6591,7 +6591,7 @@
4ca1da
     HeapWord* curAddr = _markBitMap.startWord();
4ca1da
     while (curAddr < _markBitMap.endWord()) {
4ca1da
       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
4ca1da
-      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
4ca1da
+      MemRegion chunk(curAddr, MIN2((size_t)CMSBitMapYieldQuantum, remaining));
4ca1da
       _markBitMap.clear_large_range(chunk);
4ca1da
       if (ConcurrentMarkSweepThread::should_yield() &&
4ca1da
           !foregroundGCIsActive() &&
53732a
@@ -6889,7 +6889,7 @@
4ca1da
     return;
4ca1da
   }
4ca1da
   // Double capacity if possible
4ca1da
-  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
4ca1da
+  size_t new_capacity = MIN2(_capacity*2, (size_t)MarkStackSizeMax);
4ca1da
   // Do not give up existing stack until we have managed to
4ca1da
   // get the double capacity that we desired.
4ca1da
   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/concurrentMark.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -3916,7 +3916,7 @@
4ca1da
   // of things to do) or totally (at the very end).
4ca1da
   size_t target_size;
4ca1da
   if (partially) {
4ca1da
-    target_size = MIN2((size_t)_task_queue->max_elems()/3, GCDrainStackTargetSize);
53732a
+    target_size = MIN2((size_t)_task_queue->max_elems()/3, (size_t)GCDrainStackTargetSize);
4ca1da
   } else {
4ca1da
     target_size = 0;
4ca1da
   }
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/g1BiasedArray.hpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -78,7 +78,8 @@
53732a
     size_t num_target_elems = pointer_delta(end, bottom, mapping_granularity_in_bytes);
53732a
     idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
53732a
     address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
53732a
-    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
53732a
+    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes,
53732a
+		    log2_intptr((uintptr_t)mapping_granularity_in_bytes));
4ca1da
   }
53732a
 
53732a
   size_t bias() const { return _bias; }
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Sep 08 22:20:44 2020 -0400
7a39b1
@@ -1729,7 +1729,7 @@
4ca1da
 
4ca1da
   verify_region_sets_optional();
4ca1da
 
4ca1da
-  size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
4ca1da
+  size_t expand_bytes = MAX2(word_size * HeapWordSize, (size_t)MinHeapDeltaBytes);
4ca1da
   ergo_verbose1(ErgoHeapSizing,
4ca1da
                 "attempt heap expansion",
4ca1da
                 ergo_format_reason("allocation request failed")
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/g1ConcurrentMarkObjArrayProcessor.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/g1ConcurrentMarkObjArrayProcessor.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/g1ConcurrentMarkObjArrayProcessor.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -41,7 +41,7 @@
4ca1da
 }
4ca1da
 
53732a
 size_t G1CMObjArrayProcessor::process_array_slice(objArrayOop obj, HeapWord* start_from, size_t remaining) {
53732a
-  size_t words_to_scan = MIN2(remaining, ObjArrayMarkingStride);
53732a
+  size_t words_to_scan = MIN2(remaining, (size_t)ObjArrayMarkingStride);
53732a
 
53732a
   if (remaining > ObjArrayMarkingStride) {
53732a
     push_array_slice(start_from + ObjArrayMarkingStride);
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -89,7 +89,7 @@
53732a
   void pretouch_internal(size_t start_page, size_t end_page);
53732a
 
53732a
   // Returns the index of the page which contains the given address.
53732a
-  uintptr_t  addr_to_page_index(char* addr) const;
53732a
+  size_t  addr_to_page_index(char* addr) const;
53732a
   // Returns the address of the given page index.
53732a
   char*  page_start(size_t index) const;
4ca1da
 
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp	Tue Sep 08 22:20:44 2020 -0400
4ca1da
@@ -38,7 +38,7 @@
4ca1da
   _cancel(false),
4ca1da
   _empty(true),
4ca1da
   _dropped(0) {
4ca1da
-  _nqueues = MAX2(ParallelGCThreads, (size_t)1);
4ca1da
+  _nqueues = MAX2(ParallelGCThreads, (uintx)1);
4ca1da
   _queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC);
4ca1da
   for (size_t i = 0; i < _nqueues; i++) {
4ca1da
     new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size);
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/g1StringDedupTable.cpp	Tue Sep 08 22:20:44 2020 -0400
4ca1da
@@ -120,7 +120,7 @@
4ca1da
 };
4ca1da
 
4ca1da
 G1StringDedupEntryCache::G1StringDedupEntryCache(size_t max_size) :
4ca1da
-  _nlists(MAX2(ParallelGCThreads, (size_t)1)),
4ca1da
+  _nlists(MAX2(ParallelGCThreads, (uintx)1)),
4ca1da
   _max_list_length(0),
4ca1da
   _cached(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)),
4ca1da
   _overflowed(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)) {
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/g1/heapRegion.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -110,7 +110,7 @@
4ca1da
   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
4ca1da
     size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
4ca1da
     region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(),
4ca1da
-                       (uintx) HeapRegionBounds::min_size());
4ca1da
+                       HeapRegionBounds::min_size());
4ca1da
   }
4ca1da
 
4ca1da
   int region_size_log = log2_long((jlong) region_size);
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Sep 08 22:20:44 2020 -0400
4ca1da
@@ -194,7 +194,7 @@
4ca1da
   const size_t num_overflow_elems = of_stack->size();
4ca1da
   const size_t space_available = queue->max_elems() - queue->size();
4ca1da
   const size_t num_take_elems = MIN3(space_available / 4,
4ca1da
-                                     ParGCDesiredObjsFromOverflowList,
4ca1da
+                                     (size_t)ParGCDesiredObjsFromOverflowList,
4ca1da
                                      num_overflow_elems);
4ca1da
   // Transfer the most recent num_take_elems from the overflow
4ca1da
   // stack to our work queue.
53732a
diff -r 4689eaf1a5c9 src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -912,7 +912,7 @@
53732a
 
4ca1da
 void PSParallelCompact::initialize_dead_wood_limiter()
4ca1da
 {
53732a
-  const size_t max = 100;
53732a
+  const uintx max = 100;
53732a
   _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
53732a
   _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
4ca1da
   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
53732a
diff -r 4689eaf1a5c9 src/share/vm/memory/collectorPolicy.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/memory/collectorPolicy.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/memory/collectorPolicy.cpp	Tue Sep 08 22:20:44 2020 -0400
4ca1da
@@ -385,7 +385,7 @@
4ca1da
       uintx calculated_size = NewSize + OldSize;
4ca1da
       double shrink_factor = (double) MaxHeapSize / calculated_size;
4ca1da
       uintx smaller_new_size = align_size_down((uintx)(NewSize * shrink_factor), _gen_alignment);
4ca1da
-      FLAG_SET_ERGO(uintx, NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
53732a
+      FLAG_SET_ERGO(uintx, NewSize, MAX2((uintx)young_gen_size_lower_bound(), smaller_new_size));
4ca1da
       _initial_gen0_size = NewSize;
4ca1da
 
4ca1da
       // OldSize is already aligned because above we aligned MaxHeapSize to
4ca1da
@@ -433,7 +433,7 @@
4ca1da
     // yield a size that is too small) and bound it by MaxNewSize above.
4ca1da
     // Ergonomics plays here by previously calculating the desired
4ca1da
     // NewSize and MaxNewSize.
4ca1da
-    max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
4ca1da
+    max_new_size = MIN2(MAX2(max_new_size, (size_t)NewSize), (size_t)MaxNewSize);
4ca1da
   }
4ca1da
   assert(max_new_size > 0, "All paths should set max_new_size");
4ca1da
 
53732a
@@ -455,23 +455,25 @@
4ca1da
       // lower limit.
4ca1da
       _min_gen0_size = NewSize;
4ca1da
       desired_new_size = NewSize;
4ca1da
-      max_new_size = MAX2(max_new_size, NewSize);
4ca1da
+      max_new_size = MAX2(max_new_size, (size_t)NewSize);
4ca1da
     } else if (FLAG_IS_ERGO(NewSize)) {
4ca1da
       // If NewSize is set ergonomically, we should use it as a lower
4ca1da
       // limit, but use NewRatio to calculate the initial size.
4ca1da
       _min_gen0_size = NewSize;
4ca1da
       desired_new_size =
4ca1da
-        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
4ca1da
-      max_new_size = MAX2(max_new_size, NewSize);
4ca1da
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), (size_t)NewSize);
4ca1da
+      max_new_size = MAX2(max_new_size, (size_t)NewSize);
4ca1da
     } else {
4ca1da
       // For the case where NewSize is the default, use NewRatio
4ca1da
       // to size the minimum and initial generation sizes.
4ca1da
       // Use the default NewSize as the floor for these values.  If
4ca1da
       // NewRatio is overly large, the resulting sizes can be too
4ca1da
       // small.
4ca1da
-      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
53732a
+      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size),
53732a
+			    (size_t)NewSize);
4ca1da
       desired_new_size =
4ca1da
-        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
53732a
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size),
53732a
+	     (size_t)NewSize);
53732a
     }
4ca1da
 
4ca1da
     assert(_min_gen0_size > 0, "Sanity check");
53732a
@@ -573,7 +575,7 @@
4ca1da
   } else {
4ca1da
     // It's been explicitly set on the command line.  Use the
4ca1da
     // OldSize and then determine the consequences.
4ca1da
-    _min_gen1_size = MIN2(OldSize, _min_heap_byte_size - _min_gen0_size);
4ca1da
+    _min_gen1_size = MIN2((size_t)OldSize, _min_heap_byte_size - _min_gen0_size);
4ca1da
     _initial_gen1_size = OldSize;
4ca1da
 
4ca1da
     // If the user has explicitly set an OldSize that is inconsistent
53732a
diff -r 4689eaf1a5c9 src/share/vm/memory/metaspace.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/memory/metaspace.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/memory/metaspace.cpp	Tue Sep 08 22:20:44 2020 -0400
7a39b1
@@ -1482,7 +1482,7 @@
4ca1da
 
4ca1da
 void MetaspaceGC::post_initialize() {
4ca1da
   // Reset the high-water mark once the VM initialization is done.
4ca1da
-  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), MetaspaceSize);
4ca1da
+  _capacity_until_GC = MAX2(MetaspaceAux::committed_bytes(), (size_t)MetaspaceSize);
4ca1da
 }
4ca1da
 
4ca1da
 bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
7a39b1
@@ -1542,7 +1542,7 @@
7a39b1
     (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
4ca1da
   // Don't shrink less than the initial generation size
4ca1da
   minimum_desired_capacity = MAX2(minimum_desired_capacity,
4ca1da
-                                  MetaspaceSize);
4ca1da
+                                  (size_t)MetaspaceSize);
4ca1da
 
4ca1da
   if (PrintGCDetails && Verbose) {
4ca1da
     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
7a39b1
@@ -1600,7 +1600,7 @@
4ca1da
     const double max_tmp = used_after_gc / minimum_used_percentage;
7a39b1
     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
4ca1da
     maximum_desired_capacity = MAX2(maximum_desired_capacity,
4ca1da
-                                    MetaspaceSize);
4ca1da
+                                    (size_t)MetaspaceSize);
4ca1da
     if (PrintGCDetails && Verbose) {
4ca1da
       gclog_or_tty->print_cr("  "
4ca1da
                              "  maximum_free_percentage: %6.2f"
53732a
@@ -3320,7 +3320,7 @@
53732a
     // Make the first class chunk bigger than a medium chunk so it's not put
4ca1da
     // on the medium chunk list.   The next chunk will be small and progress
4ca1da
     // from there.  This size calculated by -version.
53732a
-    _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
53732a
+    _first_class_chunk_word_size = MIN2((uintx)MediumChunk*6,
53732a
                                        (CompressedClassSpaceSize/BytesPerWord)*2);
4ca1da
     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
4ca1da
     // Arbitrarily set the initial virtual space to a multiple
53732a
diff -r 4689eaf1a5c9 src/share/vm/oops/objArrayKlass.inline.hpp
53732a
--- openjdk.orig/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/oops/objArrayKlass.inline.hpp	Tue Sep 08 22:20:44 2020 -0400
4ca1da
@@ -48,7 +48,7 @@
4ca1da
   const size_t beg_index = size_t(index);
4ca1da
   assert(beg_index < len || len == 0, "index too large");
4ca1da
 
4ca1da
-  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
4ca1da
+  const size_t stride = MIN2(len - beg_index, (size_t)ObjArrayMarkingStride);
4ca1da
   const size_t end_index = beg_index + stride;
4ca1da
   T* const base = (T*)a->base();
4ca1da
   T* const beg = base + beg_index;
4ca1da
@@ -82,7 +82,7 @@
4ca1da
   const size_t beg_index = size_t(index);
4ca1da
   assert(beg_index < len || len == 0, "index too large");
4ca1da
 
4ca1da
-  const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride);
4ca1da
+  const size_t stride = MIN2(len - beg_index, (size_t)ObjArrayMarkingStride);
4ca1da
   const size_t end_index = beg_index + stride;
4ca1da
   T* const base = (T*)a->base();
4ca1da
   T* const beg = base + beg_index;
53732a
diff -r 4689eaf1a5c9 src/share/vm/runtime/arguments.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/runtime/arguments.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/runtime/arguments.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -1301,7 +1301,7 @@
4ca1da
     // NewSize was set on the command line and it is larger than
4ca1da
     // preferred_max_new_size.
4ca1da
     if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
4ca1da
-      FLAG_SET_ERGO(uintx, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
4ca1da
+      FLAG_SET_ERGO(uintx, MaxNewSize, MAX2((size_t)NewSize, preferred_max_new_size));
4ca1da
     } else {
4ca1da
       FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
4ca1da
     }
53732a
@@ -1326,8 +1326,8 @@
4ca1da
       // Unless explicitly requested otherwise, make young gen
4ca1da
       // at least min_new, and at most preferred_max_new_size.
4ca1da
       if (FLAG_IS_DEFAULT(NewSize)) {
4ca1da
-        FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
4ca1da
-        FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
4ca1da
+        FLAG_SET_ERGO(uintx, NewSize, MAX2((size_t)NewSize, min_new));
4ca1da
+        FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, (size_t)NewSize));
4ca1da
         if (PrintGCDetails && Verbose) {
4ca1da
           // Too early to use gclog_or_tty
4ca1da
           tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
53732a
@@ -1337,7 +1337,7 @@
4ca1da
       // so it's NewRatio x of NewSize.
4ca1da
       if (FLAG_IS_DEFAULT(OldSize)) {
4ca1da
         if (max_heap > NewSize) {
4ca1da
-          FLAG_SET_ERGO(uintx, OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
4ca1da
+          FLAG_SET_ERGO(uintx, OldSize, MIN2((size_t)(NewRatio*NewSize), max_heap - NewSize));
4ca1da
           if (PrintGCDetails && Verbose) {
4ca1da
             // Too early to use gclog_or_tty
4ca1da
             tty->print_cr("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
53732a
diff -r 4689eaf1a5c9 src/share/vm/runtime/os.cpp
53732a
--- openjdk.orig/hotspot/src/share/vm/runtime/os.cpp	Mon Aug 31 07:09:56 2020 +0100
53732a
+++ openjdk/hotspot/src/share/vm/runtime/os.cpp	Tue Sep 08 22:20:44 2020 -0400
53732a
@@ -1272,7 +1272,7 @@
fad0a1
 }
fad0a1
 
53732a
 void os::set_memory_serialize_page(address page) {
53732a
-  int count = log2_intptr(sizeof(class JavaThread)) - log2_int(64);
53732a
+  int count = log2_intptr((uintptr_t)sizeof(class JavaThread)) - log2_int(64);
53732a
   _mem_serialize_page = (volatile int32_t *)page;
53732a
   // We initialize the serialization page shift count here
53732a
   // We assume a cache line size of 64 bytes