Skip to content

Commit 23dd823

Browse files
authored
Add parca_target_samples_total metric to track total scraped samples (#5988)
* Add `parca_target_samples_total` metric to track total scraped samples * Suppress staticcheck warning for Prometheus `model.LabelName` usage
1 parent 96b7f6e commit 23dd823

File tree

3 files changed

+32
-10
lines changed

3 files changed

+32
-10
lines changed

pkg/normalizer/normalizer.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -512,6 +512,7 @@ func NormalizeWriteRawRequest(ctx context.Context, req *profilestorepb.WriteRawR
512512
continue
513513
}
514514

515+
//nolint:staticcheck // SA1019: Update when we actually use the latest Prometheus
515516
if valid := model.LabelName(l.Name).IsValid(); !valid {
516517
return NormalizedWriteRawRequest{}, status.Errorf(codes.InvalidArgument, "invalid label name: %v", l.Name)
517518
}

pkg/scrape/manager.go

Lines changed: 24 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,11 @@ func NewManager(
9494
Name: "parca_target_scrapes_sample_out_of_bounds_total",
9595
Help: "Total number of samples rejected due to timestamp falling outside of the time bounds",
9696
}),
97+
sampleCount: prometheus.NewCounter(
98+
prometheus.CounterOpts{
99+
Name: "parca_target_samples_total",
100+
Help: "Total number of samples that were scraped",
101+
}),
97102
}
98103

99104
reg.MustRegister(
@@ -105,6 +110,7 @@ func NewManager(
105110
m.targetScrapeSampleDuplicate,
106111
m.targetScrapeSampleOutOfOrder,
107112
m.targetScrapeSampleOutOfBounds,
113+
m.sampleCount,
108114
)
109115

110116
c := make(map[string]*config.ScrapeConfig)
@@ -142,6 +148,7 @@ type Manager struct {
142148
targetScrapeSampleDuplicate prometheus.Counter
143149
targetScrapeSampleOutOfOrder prometheus.Counter
144150
targetScrapeSampleOutOfBounds prometheus.Counter
151+
sampleCount prometheus.Counter
145152
}
146153

147154
// Run starts the manager with a set of scrape configs.
@@ -196,16 +203,23 @@ func (m *Manager) reload() {
196203
level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName)
197204
return
198205
}
199-
sp = newScrapePool(scrapeConfig, m.store, log.With(m.logger, "scrape_pool", setName), m.externalLabels, &scrapePoolMetrics{
200-
targetIntervalLength: m.targetIntervalLength,
201-
targetReloadIntervalLength: m.targetReloadIntervalLength,
202-
targetSyncIntervalLength: m.targetSyncIntervalLength,
203-
targetScrapePoolSyncsCounter: m.targetScrapePoolSyncsCounter,
204-
targetScrapeSampleLimit: m.targetScrapeSampleLimit,
205-
targetScrapeSampleDuplicate: m.targetScrapeSampleDuplicate,
206-
targetScrapeSampleOutOfOrder: m.targetScrapeSampleOutOfOrder,
207-
targetScrapeSampleOutOfBounds: m.targetScrapeSampleOutOfBounds,
208-
})
206+
sp = newScrapePool(
207+
scrapeConfig,
208+
m.store,
209+
log.With(m.logger, "scrape_pool", setName),
210+
m.externalLabels,
211+
m.sampleCount,
212+
&scrapePoolMetrics{
213+
targetIntervalLength: m.targetIntervalLength,
214+
targetReloadIntervalLength: m.targetReloadIntervalLength,
215+
targetSyncIntervalLength: m.targetSyncIntervalLength,
216+
targetScrapePoolSyncsCounter: m.targetScrapePoolSyncsCounter,
217+
targetScrapeSampleLimit: m.targetScrapeSampleLimit,
218+
targetScrapeSampleDuplicate: m.targetScrapeSampleDuplicate,
219+
targetScrapeSampleOutOfOrder: m.targetScrapeSampleOutOfOrder,
220+
targetScrapeSampleOutOfBounds: m.targetScrapeSampleOutOfBounds,
221+
},
222+
)
209223
m.scrapePools[setName] = sp
210224
} else {
211225
sp = existing

pkg/scrape/scrape.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ func newScrapePool(
7676
store profilepb.ProfileStoreServiceServer,
7777
logger log.Logger,
7878
externalLabels labels.Labels,
79+
sampleCount prometheus.Counter,
7980
metrics *scrapePoolMetrics,
8081
) *scrapePool {
8182
if logger == nil {
@@ -109,6 +110,7 @@ func newScrapePool(
109110
log.With(logger, "target", t),
110111
externalLabels,
111112
sp.metrics.targetIntervalLength,
113+
sampleCount,
112114
buffers,
113115
store,
114116
cfg.NormalizedAddresses,
@@ -370,6 +372,7 @@ type scrapeLoop struct {
370372
scraper scraper
371373
l log.Logger
372374
intervalLength *prometheus.SummaryVec
375+
sampleCount prometheus.Counter
373376
lastScrapeSize int
374377
externalLabels labels.Labels
375378

@@ -390,6 +393,7 @@ func newScrapeLoop(ctx context.Context,
390393
l log.Logger,
391394
externalLabels labels.Labels,
392395
targetIntervalLength *prometheus.SummaryVec,
396+
sampleCount prometheus.Counter,
393397
buffers *pool.Pool,
394398
store profilepb.ProfileStoreServiceServer,
395399
normalizedAddresses bool,
@@ -409,6 +413,7 @@ func newScrapeLoop(ctx context.Context,
409413
l: l,
410414
externalLabels: externalLabels,
411415
intervalLength: targetIntervalLength,
416+
sampleCount: sampleCount,
412417
ctx: ctx,
413418
normalizedAddresses: normalizedAddresses,
414419
}
@@ -533,6 +538,8 @@ func processScrapeResp(buf *bytes.Buffer, sl *scrapeLoop, profileType string) er
533538
return err
534539
}
535540

541+
sl.sampleCount.Add(float64(len(p.Sample)))
542+
536543
var executableInfo []*profilepb.ExecutableInfo
537544
for _, comment := range p.Comments {
538545
if strings.HasPrefix(comment, "executableInfo=") {

0 commit comments

Comments
 (0)