@@ -560,7 +560,7 @@ int Profiler::getJavaTraceAsync(void *ucontext, ASGCT_CallFrame *frames,
560560 return 0 ;
561561 }
562562
563- atomicInc (_failures[-trace.num_frames ]);
563+ atomicIncRelaxed (_failures[-trace.num_frames ]);
564564 trace.frames ->bci = BCI_ERROR;
565565 trace.frames ->method_id = (jmethodID)err_string;
566566 return trace.frames - frames + 1 ;
@@ -608,14 +608,14 @@ void Profiler::fillFrameTypes(ASGCT_CallFrame *frames, int num_frames,
608608}
609609
610610u64 Profiler::recordJVMTISample (u64 counter, int tid, jthread thread, jint event_type, Event *event, bool deferred) {
611- atomicInc (_total_samples);
611+ atomicIncRelaxed (_total_samples);
612612
613613 u32 lock_index = getLockIndex (tid);
614614 if (!_locks[lock_index].tryLock () &&
615615 !_locks[lock_index = (lock_index + 1 ) % CONCURRENCY_LEVEL].tryLock () &&
616616 !_locks[lock_index = (lock_index + 2 ) % CONCURRENCY_LEVEL].tryLock ()) {
617617 // Too many concurrent signals already
618- atomicInc (_failures[-ticks_skipped]);
618+ atomicIncRelaxed (_failures[-ticks_skipped]);
619619
620620 return 0 ;
621621 }
@@ -655,14 +655,14 @@ u64 Profiler::recordJVMTISample(u64 counter, int tid, jthread thread, jint event
655655}
656656
657657void Profiler::recordDeferredSample (int tid, u64 call_trace_id, jint event_type, Event *event) {
658- atomicInc (_total_samples);
658+ atomicIncRelaxed (_total_samples);
659659
660660 u32 lock_index = getLockIndex (tid);
661661 if (!_locks[lock_index].tryLock () &&
662662 !_locks[lock_index = (lock_index + 1 ) % CONCURRENCY_LEVEL].tryLock () &&
663663 !_locks[lock_index = (lock_index + 2 ) % CONCURRENCY_LEVEL].tryLock ()) {
664664 // Too many concurrent signals already
665- atomicInc (_failures[-ticks_skipped]);
665+ atomicIncRelaxed (_failures[-ticks_skipped]);
666666 return ;
667667 }
668668
@@ -673,14 +673,14 @@ void Profiler::recordDeferredSample(int tid, u64 call_trace_id, jint event_type,
673673
674674void Profiler::recordSample (void *ucontext, u64 counter, int tid,
675675 jint event_type, u64 call_trace_id, Event *event) {
676- atomicInc (_total_samples);
676+ atomicIncRelaxed (_total_samples);
677677
678678 u32 lock_index = getLockIndex (tid);
679679 if (!_locks[lock_index].tryLock () &&
680680 !_locks[lock_index = (lock_index + 1 ) % CONCURRENCY_LEVEL].tryLock () &&
681681 !_locks[lock_index = (lock_index + 2 ) % CONCURRENCY_LEVEL].tryLock ()) {
682682 // Too many concurrent signals already
683- atomicInc (_failures[-ticks_skipped]);
683+ atomicIncRelaxed (_failures[-ticks_skipped]);
684684
685685 if (event_type == BCI_CPU && _cpu_engine == &perf_events) {
686686 // Need to reset PerfEvents ring buffer, even though we discard the
@@ -789,7 +789,7 @@ void Profiler::recordQueueTime(int tid, QueueTimeEvent *event) {
789789void Profiler::recordExternalSample (u64 weight, int tid, int num_frames,
790790 ASGCT_CallFrame *frames, bool truncated,
791791 jint event_type, Event *event) {
792- atomicInc (_total_samples);
792+ atomicIncRelaxed (_total_samples);
793793
794794 u64 call_trace_id =
795795 _call_trace_storage.put (num_frames, frames, truncated, weight);
@@ -799,7 +799,7 @@ void Profiler::recordExternalSample(u64 weight, int tid, int num_frames,
799799 !_locks[lock_index = (lock_index + 1 ) % CONCURRENCY_LEVEL].tryLock () &&
800800 !_locks[lock_index = (lock_index + 2 ) % CONCURRENCY_LEVEL].tryLock ()) {
801801 // Too many concurrent signals already
802- atomicInc (_failures[-ticks_skipped]);
802+ atomicIncRelaxed (_failures[-ticks_skipped]);
803803 return ;
804804 }
805805
0 commit comments