Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 60 additions & 2 deletions generator/metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,19 +199,24 @@ func (g *Generator) generateAndWrite(writer output.Writer) error {
for _, resAttrs := range g.resourceCombos {
for i := range g.metrics {
def := &g.metrics[i]
value := def.ValueMin + g.rng.Float64()*(def.ValueMax-def.ValueMin)

record := output.MetricRecord{
Name: def.Name,
Description: def.Description,
Unit: def.Unit,
Type: def.Type,
DoubleValue: &value,
Attributes: def.Attributes,
ResourceAttributes: resAttrs,
Timestamp: now,
}

if def.Type == output.MetricTypeHistogram {
g.populateHistogram(&record, def)
} else {
value := def.ValueMin + g.rng.Float64()*(def.ValueMax-def.ValueMin)
record.DoubleValue = &value
}

if err := writer.WriteMetric(ctx, record); err != nil {
errorType := "unknown"
if ctx.Err() == context.DeadlineExceeded {
Expand Down Expand Up @@ -239,6 +244,59 @@ func (g *Generator) recordWriteError(errorType string) {
)
}

// populateHistogram fills a MetricRecord with synthetic histogram data.
// It generates bucket boundaries evenly spaced between ValueMin and ValueMax,
// random counts per bucket, and computes sum/min/max from the distribution.
func (g *Generator) populateHistogram(record *output.MetricRecord, def *MetricDefinition) {
const numBuckets = 5
span := def.ValueMax - def.ValueMin
step := span / float64(numBuckets)

bounds := make([]float64, numBuckets)
for i := range bounds {
bounds[i] = def.ValueMin + step*float64(i+1)
}

// Generate random counts for each bucket (numBuckets + 1 including overflow).
bucketCounts := make([]uint64, numBuckets+1)
var totalCount uint64
var sum float64
minVal := def.ValueMax
maxVal := def.ValueMin

for i := range bucketCounts {
c := uint64(g.rng.Intn(20) + 1)
bucketCounts[i] = c
totalCount += c

// Estimate a representative value for this bucket to compute sum/min/max.
var representative float64
switch {
case i == 0:
representative = def.ValueMin + step*0.5
case i == numBuckets:
representative = bounds[numBuckets-1] + step*0.5
default:
representative = bounds[i-1] + step*0.5
}

sum += representative * float64(c)
if representative < minVal {
minVal = representative
}
if representative > maxVal {
maxVal = representative
}
}

record.HistogramBucketBounds = bounds
record.HistogramBucketCounts = bucketCounts
record.HistogramCount = &totalCount
record.HistogramSum = &sum
record.HistogramMin = &minVal
record.HistogramMax = &maxVal
}

// cartesianProduct computes the cartesian product of a map of keys to
// value lists. Each returned map has exactly one value per key. If the
// input is nil or empty, a single empty map is returned so callers
Expand Down
4 changes: 2 additions & 2 deletions internal/config/generator_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ func (m *MetricDefinition) Validate() error {
return fmt.Errorf("metric name is required")
}
switch m.Type {
case "gauge", "sum":
case "gauge", "sum", "counter", "histogram":
default:
return fmt.Errorf("metric %q: type must be \"gauge\" or \"sum\", got %q", m.Name, m.Type)
return fmt.Errorf("metric %q: type must be \"gauge\", \"sum\", \"counter\", or \"histogram\", got %q", m.Name, m.Type)
}
if m.ValueMax < m.ValueMin {
return fmt.Errorf("metric %q: valueMax (%g) must be >= valueMin (%g)", m.Name, m.ValueMax, m.ValueMin)
Expand Down
20 changes: 19 additions & 1 deletion internal/config/generator_metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,29 @@ func TestMetricDefinition_Validate(t *testing.T) {
def: MetricDefinition{Type: "gauge"},
wantErr: true,
},
{
name: "valid counter",
def: MetricDefinition{
Name: "http.requests",
Type: "counter",
ValueMin: 1,
ValueMax: 50,
},
},
{
name: "valid histogram",
def: MetricDefinition{
Name: "http.duration",
Type: "histogram",
ValueMin: 0,
ValueMax: 5,
},
},
{
name: "invalid type",
def: MetricDefinition{
Name: "m",
Type: "histogram",
Type: "exponential_histogram",
},
wantErr: true,
},
Expand Down
52 changes: 38 additions & 14 deletions output/otlp_grpc/otlp_grpc.go
Original file line number Diff line number Diff line change
Expand Up @@ -501,34 +501,44 @@ func (o *OTLPGrpc) buildOTLPMetric(data output.MetricRecord) *metricspb.Metric {
})
}

// Build the data point value
dp := &metricspb.NumberDataPoint{
TimeUnixNano: timeNano,
Attributes: attrs,
}
if data.DoubleValue != nil {
dp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: *data.DoubleValue}
} else if data.IntValue != nil {
dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: *data.IntValue}
}

m := &metricspb.Metric{
Name: data.Name,
Description: data.Description,
Unit: data.Unit,
}

switch data.Type {
case output.MetricTypeSum:
case output.MetricTypeHistogram:
hdp := &metricspb.HistogramDataPoint{
TimeUnixNano: timeNano,
Attributes: attrs,
ExplicitBounds: data.HistogramBucketBounds,
BucketCounts: data.HistogramBucketCounts,
}
if data.HistogramCount != nil {
hdp.Count = *data.HistogramCount
}
hdp.Sum = data.HistogramSum
hdp.Min = data.HistogramMin
hdp.Max = data.HistogramMax
m.Data = &metricspb.Metric_Histogram{
Histogram: &metricspb.Histogram{
AggregationTemporality: metricspb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
DataPoints: []*metricspb.HistogramDataPoint{hdp},
},
}
case output.MetricTypeSum, output.MetricTypeCounter:
dp := o.buildNumberDataPoint(timeNano, attrs, data)
m.Data = &metricspb.Metric_Sum{
Sum: &metricspb.Sum{
AggregationTemporality: metricspb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
IsMonotonic: true,
DataPoints: []*metricspb.NumberDataPoint{dp},
IsMonotonic: true,
DataPoints: []*metricspb.NumberDataPoint{dp},
},
}
default:
// Default to gauge
dp := o.buildNumberDataPoint(timeNano, attrs, data)
m.Data = &metricspb.Metric_Gauge{
Gauge: &metricspb.Gauge{
DataPoints: []*metricspb.NumberDataPoint{dp},
Expand All @@ -539,6 +549,20 @@ func (o *OTLPGrpc) buildOTLPMetric(data output.MetricRecord) *metricspb.Metric {
return m
}

// buildNumberDataPoint creates a NumberDataPoint from a MetricRecord.
func (o *OTLPGrpc) buildNumberDataPoint(timeNano uint64, attrs []*commonpb.KeyValue, data output.MetricRecord) *metricspb.NumberDataPoint {
dp := &metricspb.NumberDataPoint{
TimeUnixNano: timeNano,
Attributes: attrs,
}
if data.DoubleValue != nil {
dp.Value = &metricspb.NumberDataPoint_AsDouble{AsDouble: *data.DoubleValue}
} else if data.IntValue != nil {
dp.Value = &metricspb.NumberDataPoint_AsInt{AsInt: *data.IntValue}
}
return dp
}

// SupportedTelemetry returns the telemetry types this output supports.
func (o *OTLPGrpc) SupportedTelemetry() []telemetry.Type {
return []telemetry.Type{telemetry.Logs, telemetry.Metrics}
Expand Down
20 changes: 20 additions & 0 deletions output/output.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ const (
MetricTypeGauge MetricType = "gauge"
// MetricTypeSum represents a sum (counter) metric.
MetricTypeSum MetricType = "sum"
// MetricTypeCounter represents a monotonic counter metric (OTLP Sum with IsMonotonic=true).
MetricTypeCounter MetricType = "counter"
// MetricTypeHistogram represents a histogram metric.
MetricTypeHistogram MetricType = "histogram"
)

// MetricRecord represents a single metric data point.
Expand Down Expand Up @@ -70,6 +74,22 @@ type MetricRecord struct {

// Timestamp is when the measurement was taken.
Timestamp time.Time

// Histogram fields (only used when Type is MetricTypeHistogram).

// HistogramCount is the total number of observations.
HistogramCount *uint64
// HistogramSum is the sum of all observed values.
HistogramSum *float64
// HistogramMin is the minimum observed value.
HistogramMin *float64
// HistogramMax is the maximum observed value.
HistogramMax *float64
// HistogramBucketBounds are the explicit bucket boundaries.
HistogramBucketBounds []float64
// HistogramBucketCounts are the counts for each bucket
// (length = len(HistogramBucketBounds) + 1).
HistogramBucketCounts []uint64
}

// Writer can consume log and metric records.
Expand Down
Loading