@@ -44,24 +44,33 @@ narrowKlass CompressedKlassPointers::_lowest_valid_narrow_klass_id = (narrowKlas
4444narrowKlass CompressedKlassPointers::_highest_valid_narrow_klass_id = (narrowKlass)-1 ;
4545size_t CompressedKlassPointers::_protection_zone_size = 0 ;
4646
47- #ifdef _LP64
48-
4947size_t CompressedKlassPointers::max_klass_range_size () {
50- // We disallow klass range sizes larger than 4GB even if the encoding
51- // range would allow for a larger Klass range (e.g. Base=zero, shift=3 -> 32GB).
52- // That is because many CPU-specific compiler decodings do not want the
53- // shifted narrow Klass to spill over into the third quadrant of the 64-bit target
54- // address, e.g. to use a 16-bit move for a simplified base addition.
55- return MIN2 (4 * G, max_encoding_range_size ());
48+ #ifdef _LP64
49+ const size_t encoding_allows = nth_bit (narrow_klass_pointer_bits () + max_shift ());
50+ constexpr size_t cap = 4 * G;
51+ return MIN2 (encoding_allows, cap);
52+ #else
53+ // 32-bit: only 32-bit "narrow" Klass pointers allowed. If we ever support smaller narrow
54+ // Klass pointers here, coding needs to be revised.
55+ // We keep one page safety zone free to guard against size_t overflows on 32-bit. In practice
56+ // this is irrelevant because these upper address space parts are not user-addressable on
57+ // any of our 32-bit platforms.
58+ return align_down (UINT_MAX, os::vm_page_size ());
59+ #endif
5660}
5761
5862void CompressedKlassPointers::pre_initialize () {
5963 if (UseCompactObjectHeaders) {
6064 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_coh;
6165 _max_shift = max_shift_coh;
6266 } else {
67+ #ifdef _LP64
6368 _narrow_klass_pointer_bits = narrow_klass_pointer_bits_noncoh;
6469 _max_shift = max_shift_noncoh;
70+ #else
71+ _narrow_klass_pointer_bits = 32 ;
72+ _max_shift = 0 ;
73+ #endif
6574 }
6675}
6776
@@ -84,6 +93,10 @@ void CompressedKlassPointers::sanity_check_after_initialization() {
8493 ASSERT_HERE (_base != (address)-1 );
8594 ASSERT_HERE (_shift != -1 );
8695
96+ // We should need a class space if address space is larger than what narrowKlass can address
97+ const bool should_need_class_space = (BytesPerWord * BitsPerByte) > narrow_klass_pointer_bits ();
98+ ASSERT_HERE (should_need_class_space == needs_class_space ());
99+
87100 const size_t klass_align = klass_alignment_in_bytes ();
88101
89102 // must be aligned enough hold 64-bit data
@@ -96,7 +109,9 @@ void CompressedKlassPointers::sanity_check_after_initialization() {
96109
97110 // Check that Klass range is fully engulfed in the encoding range
98111 const address encoding_start = _base;
99- const address encoding_end = (address)(p2u (_base) + (uintptr_t )nth_bit (narrow_klass_pointer_bits () + _shift));
112+ const address encoding_end = (address)
113+ LP64_ONLY (p2u (_base) + (uintptr_t )nth_bit (narrow_klass_pointer_bits () + _shift))
114+ NOT_LP64 (max_klass_range_size ());
100115 ASSERT_HERE_2 (_klass_range_start >= _base && _klass_range_end <= encoding_end,
101116 " Resulting encoding range does not fully cover the class range" );
102117
@@ -239,6 +254,7 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
239254
240255 } else {
241256
257+ #ifdef _LP64
242258 // Traditional (non-compact) header mode
243259 const uintptr_t unscaled_max = nth_bit (narrow_klass_pointer_bits ());
244260 const uintptr_t zerobased_max = nth_bit (narrow_klass_pointer_bits () + max_shift ());
@@ -250,6 +266,7 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
250266 address const end = addr + len;
251267 _base = (end <= (address)unscaled_max) ? nullptr : addr;
252268#else
269+
253270 // We try, in order of preference:
254271 // -unscaled (base=0 shift=0)
255272 // -zero-based (base=0 shift>0)
@@ -270,11 +287,19 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
270287 }
271288 }
272289#endif // AARCH64
290+ #else
291+ // 32-bit "compressed class pointer" mode
292+ _base = nullptr ;
293+ _shift = 0 ;
294+ // as our "protection zone", we just assume the lowest protected parts of
295+ // the user address space.
296+ _protection_zone_size = os::vm_min_address ();
297+ #endif // LP64
273298 }
274299
275300 calc_lowest_highest_narrow_klass_id ();
276301
277- // Initialize klass decode mode and check compability with decode instructions
302+ // Initialize JIT-specific decoding settings
278303 if (!set_klass_decode_mode ()) {
279304
280305 // Give fatal error if this is a specified address
@@ -288,9 +313,8 @@ void CompressedKlassPointers::initialize(address addr, size_t len) {
288313 p2i (_base), _shift);
289314 }
290315 }
291- #ifdef ASSERT
292- sanity_check_after_initialization ();
293- #endif
316+
317+ DEBUG_ONLY (sanity_check_after_initialization ();)
294318}
295319
296320void CompressedKlassPointers::print_mode (outputStream* st) {
@@ -341,4 +365,3 @@ bool CompressedKlassPointers::is_in_protection_zone(address addr) {
341365 (addr >= base () && addr < base () + _protection_zone_size) : false ;
342366}
343367
344- #endif // _LP64
0 commit comments