diff --git a/_examples/metrics/main.go b/_examples/metrics/main.go new file mode 100644 index 000000000..815e42efe --- /dev/null +++ b/_examples/metrics/main.go @@ -0,0 +1,54 @@ +package main + +import ( + "time" + + "github.com/getsentry/sentry-go" + "github.com/getsentry/sentry-go/attribute" +) + +func main() { + err := sentry.Init(sentry.ClientOptions{ + Dsn: "", + EnabnleMetrics: true, + }) + if err != nil { + panic(err) + } + defer sentry.Flush(2 * time.Second) + + ctx := sentry.NewContext() + meter := sentry.NewMeter[int](ctx) + // Attaching permanent attributes on the meter + meter.SetAttributes( + attribute.String("version", "1.0.0"), + ) + + // Count metrics to measure occurrences of an event. + meter.Count("sent_emails", 1, sentry.MeterOptions{ + Attributes: []sentry.Attribute{ + attribute.String("email.provider", "sendgrid"), + attribute.Int("email.number_of_recipients", 3), + }, + }) + + // Distribution metrics to measure the statistical distribution of a set of values. + // Useful for measuring things and keeping track of the patterns, e.g. file sizes, response times, etc. + meter.Distribution("file_upload_size", 3.14, sentry.MeterOptions{ + Unit: "MB", // Unit is optional, but it's recommended! + Attributes: []sentry.Attribute{ + attribute.String("file.type", "image/png"), + attribute.String("bucket.region", "us-west-2"), + attribute.String("bucket.name", "user-uploads"), + }, + }) + + // Gauge metrics to measure a value at a specific point in time. + // Useful for measuring values that can go up and down, e.g. temperature, memory usage, etc. + meter.Gauge("active_chat_conversations", 7, sentry.MeterOptions{ + Unit: "chat_rooms", // Unit is optional, but it's recommended! + Attributes: []sentry.Attribute{ + attribute.String("region", "asia-northeast1"), + }, + }) +} diff --git a/batch_meter.go b/batch_meter.go new file mode 100644 index 000000000..cb6bc5082 --- /dev/null +++ b/batch_meter.go @@ -0,0 +1,127 @@ +package sentry + +import ( + "context" + "sync" + "time" +) + +type BatchMeter struct { + client *Client + metricsCh chan Metric + flushCh chan chan struct{} + cancel context.CancelFunc + wg sync.WaitGroup + startOnce sync.Once + shutdownOnce sync.Once +} + +func NewBatchMeter(client *Client) *BatchMeter { + return &BatchMeter{ + client: client, + metricsCh: make(chan Metric, batchSize), + flushCh: make(chan chan struct{}), + } +} + +func (m *BatchMeter) Start() { + m.startOnce.Do(func() { + ctx, cancel := context.WithCancel(context.Background()) + m.cancel = cancel + m.wg.Add(1) + go m.run(ctx) + }) +} + +func (m *BatchMeter) Flush(timeout <-chan struct{}) { + done := make(chan struct{}) + select { + case m.flushCh <- done: + select { + case <-done: + case <-timeout: + } + case <-timeout: + } +} + +func (m *BatchMeter) Shutdown() { + m.shutdownOnce.Do(func() { + if m.cancel != nil { + m.cancel() + m.wg.Wait() + } + }) +} + +func (m *BatchMeter) run(ctx context.Context) { // nolint:dupl + defer m.wg.Done() + var metrics []Metric + timer := time.NewTimer(batchTimeout) + defer timer.Stop() + + for { + select { + case metric := <-m.metricsCh: + metrics = append(metrics, metric) + if len(metrics) >= batchSize { + m.processEvent(metrics) + metrics = nil + if !timer.Stop() { + <-timer.C + } + timer.Reset(batchTimeout) + } + case <-timer.C: + if len(metrics) > 0 { + m.processEvent(metrics) + metrics = nil + } + timer.Reset(batchTimeout) + case done := <-m.flushCh: + flushDrain: + for { + select { + case metric := <-m.metricsCh: + metrics = append(metrics, metric) + default: + break flushDrain + } + } + + if len(metrics) > 0 { + m.processEvent(metrics) + metrics = nil + } + if !timer.Stop() { + <-timer.C + } + timer.Reset(batchTimeout) + close(done) + case <-ctx.Done(): + drain: + for { + select { + case metric := <-m.metricsCh: + metrics = append(metrics, metric) + default: + break drain + } + } + + if len(metrics) > 0 { + m.processEvent(metrics) + } + return + } + } +} + +func (m *BatchMeter) processEvent(metrics []Metric) { + event := NewEvent() + event.Timestamp = time.Now() + event.EventID = EventID(uuid()) + event.Type = traceMetricEvent.Type + event.Metrics = metrics + m.client.Transport.SendEvent(event) +} diff --git a/client.go b/client.go index 3aa267f70..b51ff31b5 100644 --- a/client.go +++ b/client.go @@ -164,6 +164,9 @@ type ClientOptions struct { BeforeSendTransaction func(event *Event, hint *EventHint) *Event // Before breadcrumb add callback. BeforeBreadcrumb func(breadcrumb *Breadcrumb, hint *BreadcrumbHint) *Breadcrumb + // BeforeSendMetric is called before metric events are sent to Sentry. + // You can use it to mutate the metric or return nil to discard it. + BeforeSendMetric func(metric *Metric) *Metric // Integrations to be installed on the current Client, receives default // integrations. Integrations func([]Integration) []Integration @@ -238,6 +241,8 @@ type ClientOptions struct { Tags map[string]string // EnableLogs controls when logs should be emitted. EnableLogs bool + // EnableMetrics controls when metrics should be emitted. + EnableMetrics bool // TraceIgnoreStatusCodes is a list of HTTP status codes that should not be traced. // Each element can be either: // - A single-element slice [code] for a specific status code @@ -274,6 +279,7 @@ type Client struct { // not supported, create a new client instead. Transport Transport batchLogger *BatchLogger + batchMeter *BatchMeter telemetryBuffer *telemetry.Buffer } @@ -389,6 +395,11 @@ func NewClient(options ClientOptions) (*Client, error) { client.batchLogger.Start() } + if options.EnableMetrics { + client.batchMeter = NewBatchMeter(&client) + client.batchMeter.Start() + } + client.setupIntegrations() return &client, nil @@ -446,6 +457,7 @@ func (client *Client) setupTelemetryBuffer() { // nolint: unused ratelimit.CategoryTransaction: telemetry.NewRingBuffer[protocol.EnvelopeItemConvertible](ratelimit.CategoryTransaction, 1000, telemetry.OverflowPolicyDropOldest, 1, 0), ratelimit.CategoryLog: telemetry.NewRingBuffer[protocol.EnvelopeItemConvertible](ratelimit.CategoryLog, 10*100, telemetry.OverflowPolicyDropOldest, 100, 5*time.Second), ratelimit.CategoryMonitor: telemetry.NewRingBuffer[protocol.EnvelopeItemConvertible](ratelimit.CategoryMonitor, 100, telemetry.OverflowPolicyDropOldest, 1, 0), + ratelimit.CategoryTraceMetric: telemetry.NewRingBuffer[protocol.EnvelopeItemConvertible](ratelimit.CategoryTraceMetric, 1000, telemetry.OverflowPolicyDropOldest, 1, 0), } sdkInfo := &protocol.SdkInfo{ @@ -596,7 +608,7 @@ func (client *Client) RecoverWithContext( // the network synchronously, configure it to use the HTTPSyncTransport in the // call to Init. func (client *Client) Flush(timeout time.Duration) bool { - if client.batchLogger != nil || client.telemetryBuffer != nil { + if client.batchLogger != nil || client.batchMeter != nil || client.telemetryBuffer != nil { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() return client.FlushWithContext(ctx) @@ -620,6 +632,9 @@ func (client *Client) FlushWithContext(ctx context.Context) bool { if client.batchLogger != nil { client.batchLogger.Flush(ctx.Done()) } + if client.batchMeter != nil { + client.batchMeter.Flush(ctx.Done()) + } if client.telemetryBuffer != nil { return client.telemetryBuffer.FlushWithContext(ctx) } @@ -637,6 +652,9 @@ func (client *Client) Close() { if client.batchLogger != nil { client.batchLogger.Shutdown() } + if client.batchMeter != nil { + client.batchMeter.Shutdown() + } client.Transport.Close() } diff --git a/interfaces.go b/interfaces.go index d7c197df1..9ac844ddd 100644 --- a/interfaces.go +++ b/interfaces.go @@ -26,6 +26,14 @@ var logEvent = struct { "application/vnd.sentry.items.log+json", } +var traceMetricEvent = struct { + Type string + ContentType string +}{ + "trace_metric", + "application/vnd.sentry.items.trace-metric+json", +} + // Level marks the severity of the event. type Level string @@ -141,6 +149,34 @@ type LogEntry interface { Emitf(format string, args ...interface{}) } +type MeterOptions struct { + // Attributes are key/value pairs that will be added to the metric. + // The attributes set here will take precedence over the attributes + // set from the Meter. + Attributes []attribute.Builder + // The unit of measurements, for "gauge" and "distribution" metrics. + Unit string +} + +// Numbers provides a generic constraint for numeric types. +type Numbers interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 | float32 | float64 +} + +// Meter provides an interface for recording metrics. +type Meter[T Numbers] interface { + // GetCtx returns the [context.Context] set on the meter. + GetCtx() context.Context + // SetAttributes allows attaching parameters to the meter using the attribute API. + SetAttributes(...attribute.Builder) + // Count records a count metric. + Count(name string, count int64, options MeterOptions) + // Gauge records a gauge metric. + Gauge(name string, value T, options MeterOptions) + // Distribution records a distribution metric. + Distribution(name string, sample float64, options MeterOptions) +} + // Attachment allows associating files with your events to aid in investigation. // An event may contain one or more attachments. type Attachment struct { @@ -398,6 +434,9 @@ type Event struct { // The fields below are only relevant for logs Logs []Log `json:"items,omitempty"` + // The fields below are only relevant for metrics + Metrics []Metric `json:"metrics,omitempty"` + // The fields below are not part of the final JSON payload. sdkMetaData SDKMetaData @@ -687,6 +726,8 @@ func (e *Event) toCategory() ratelimit.Category { return ratelimit.CategoryLog case checkInType: return ratelimit.CategoryMonitor + case traceMetricEvent.Type: + return ratelimit.CategoryTraceMetric default: return ratelimit.CategoryUnknown } @@ -808,3 +849,89 @@ type Attribute struct { Value any `json:"value"` Type AttrType `json:"type"` } + +type MetricType string + +const ( + MetricTypeInvalid MetricType = "" + MetricTypeCounter MetricType = "counter" + MetricTypeGauge MetricType = "gauge" + MetricTypeDistribution MetricType = "distribution" +) + +type Metric struct { + Timestamp time.Time `json:"timestamp"` + TraceID TraceID `json:"trace_id,omitempty"` + Type MetricType `json:"type"` + Name string `json:"name,omitempty"` + Value float64 `json:"value"` + Unit string `json:"unit,omitempty"` + Attributes map[string]Attribute `json:"attributes,omitempty"` +} + +func (m *Metric) ToEnvelopeItem() (*protocol.EnvelopeItem, error) { + type metricJSON struct { + Timestamp *float64 `json:"timestamp,omitempty"` + TraceID string `json:"trace_id,omitempty"` + Type string `json:"type"` + Name string `json:"name,omitempty"` + Value float64 `json:"value"` + Unit string `json:"unit,omitempty"` + Attributes map[string]protocol.LogAttribute `json:"attributes,omitempty"` + } + + // Convert time.Time to seconds float if set + var ts *float64 + if !m.Timestamp.IsZero() { + sec := float64(m.Timestamp.UnixNano()) / 1e9 + ts = &sec + } + + attrs := make(map[string]protocol.LogAttribute, len(m.Attributes)) + for k, v := range m.Attributes { + attrs[k] = protocol.LogAttribute{Value: v.Value, Type: string(v.Type)} + } + + metricData, err := json.Marshal(metricJSON{ + Timestamp: ts, + TraceID: m.TraceID.String(), + Type: string(m.Type), + Name: m.Name, + Value: m.Value, + Unit: m.Unit, + Attributes: attrs, + }) + if err != nil { + return nil, err + } + + return &protocol.EnvelopeItem{ + Header: &protocol.EnvelopeItemHeader{ + Type: protocol.EnvelopeItemTypeTraceMetric, + // XXX(aldy505): I'm basically copy-pasting the implementation for + // logs here, but it doesn't seem like "item_count" and "content_type" + // are actually written here? Not sure. + }, + Payload: metricData, + }, nil +} + +// GetCategory returns the rate limit category for metrics. +func (m *Metric) GetCategory() ratelimit.Category { + return ratelimit.CategoryTraceMetric +} + +// GetEventID returns empty string (event ID set when batching). +func (m *Metric) GetEventID() string { + return "" +} + +// GetSdkInfo returns nil (SDK info set when batching). +func (m *Metric) GetSdkInfo() *protocol.SdkInfo { + return nil +} + +// GetDynamicSamplingContext returns nil (trace context set when batching). +func (m *Metric) GetDynamicSamplingContext() map[string]string { + return nil +} diff --git a/interfaces_test.go b/interfaces_test.go index 795c1f8b8..5d4f83c16 100644 --- a/interfaces_test.go +++ b/interfaces_test.go @@ -785,6 +785,21 @@ func TestEvent_ToEnvelope(t *testing.T) { dsn: nil, wantError: false, }, + { + name: "metric event", + event: &Event{ + EventID: "12345678901234567890123456789012", + Type: "trace_metric", + Metrics: []Metric{ + { + Name: "test.metric", + Value: 42, + Timestamp: time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC), + Type: MetricTypeCounter, + }, + }, + }, + }, } for _, tt := range tests { diff --git a/internal/protocol/envelope.go b/internal/protocol/envelope.go index 65e305caf..f32b6da4a 100644 --- a/internal/protocol/envelope.go +++ b/internal/protocol/envelope.go @@ -46,6 +46,7 @@ const ( EnvelopeItemTypeCheckIn EnvelopeItemType = "check_in" EnvelopeItemTypeAttachment EnvelopeItemType = "attachment" EnvelopeItemTypeLog EnvelopeItemType = "log" + EnvelopeItemTypeTraceMetric EnvelopeItemType = "trace_metric" ) // EnvelopeItemHeader represents the header of an envelope item. @@ -211,3 +212,17 @@ func NewLogItem(itemCount int, payload []byte) *EnvelopeItem { Payload: payload, } } + +// NewTraceMetricItem creates a new envelope item for trace metrics. +func NewTraceMetricItem(itemCount int, payload []byte) *EnvelopeItem { + length := len(payload) + return &EnvelopeItem{ + Header: &EnvelopeItemHeader{ + Type: EnvelopeItemTypeTraceMetric, + Length: &length, + ItemCount: &itemCount, + ContentType: "application/vnd.sentry.items.trace-metric+json", + }, + Payload: payload, + } +} diff --git a/internal/protocol/envelope_test.go b/internal/protocol/envelope_test.go index dac63a5df..8a4b9d0f6 100644 --- a/internal/protocol/envelope_test.go +++ b/internal/protocol/envelope_test.go @@ -45,6 +45,14 @@ func TestEnvelope_ItemsAndSerialization(t *testing.T) { payload: []byte(`[{"timestamp":"2023-01-01T12:00:00Z","level":"info","message":"test log"}]`), creator: func(p []byte) *EnvelopeItem { return NewLogItem(1, p) }, }, + { + name: "trace_metric", + itemType: EnvelopeItemTypeTraceMetric, + payload: []byte(`[{"name":"test.metric","type":"gauge","value":42,"unit":"units","attributes":{"key.string":{"value":"value","type":"string"}}}]`), + creator: func(p []byte) *EnvelopeItem { + return NewTraceMetricItem(1, p) + }, + }, } for _, tt := range tests { diff --git a/internal/protocol/metric_batch.go b/internal/protocol/metric_batch.go new file mode 100644 index 000000000..ad4b17f39 --- /dev/null +++ b/internal/protocol/metric_batch.go @@ -0,0 +1,41 @@ +package protocol + +import ( + "encoding/json" + + "github.com/getsentry/sentry-go/internal/ratelimit" +) + +type Metrics []EnvelopeItemConvertible + +func (ms Metrics) ToEnvelopeItem() (*EnvelopeItem, error) { + // Convert each metric to its JSON representation + items := make([][]byte, 0, len(ms)) + for _, metric := range ms { + envItem, err := metric.ToEnvelopeItem() + if err != nil { + continue + } + items = append(items, envItem.Payload) + } + + if len(items) == 0 { + return nil, nil + } + + wrapper := struct { + Items [][]byte `json:"items"` + }{Items: items} + + payload, err := json.Marshal(wrapper) + if err != nil { + return nil, err + } + + return NewTraceMetricItem(len(items), payload), nil +} + +func (Metrics) GetCategory() ratelimit.Category { return ratelimit.CategoryTraceMetric } +func (Metrics) GetEventID() string { return "" } +func (Metrics) GetSdkInfo() *SdkInfo { return nil } +func (Metrics) GetDynamicSamplingContext() map[string]string { return nil } diff --git a/internal/protocol/metric_batch_test.go b/internal/protocol/metric_batch_test.go new file mode 100644 index 000000000..99b1b7d97 --- /dev/null +++ b/internal/protocol/metric_batch_test.go @@ -0,0 +1,66 @@ +package protocol + +import ( + "encoding/json" + "testing" + + "github.com/getsentry/sentry-go/internal/ratelimit" +) + +type dummyMetric struct { + Name string `json:"name"` + Type string `json:"type"` + Value any `json:"value"` +} + +func (d dummyMetric) ToEnvelopeItem() (*EnvelopeItem, error) { + payload, err := json.Marshal(d) + if err != nil { + return nil, err + } + + return &EnvelopeItem{ + Header: &EnvelopeItemHeader{Type: EnvelopeItemTypeTraceMetric}, + Payload: payload, + }, nil +} + +func (d dummyMetric) GetCategory() ratelimit.Category { return ratelimit.CategoryTraceMetric } +func (d dummyMetric) GetEventID() string { return "" } +func (d dummyMetric) GetSdkInfo() *SdkInfo { return nil } +func (d dummyMetric) GetDynamicSamplingContext() map[string]string { return nil } + +func TestMetric_ToEnvelopeItem(t *testing.T) { + metrics := Metrics{dummyMetric{Name: "metric1", Type: "gauge", Value: 42}, dummyMetric{Name: "metric2", Type: "count", Value: 7}} + item, err := metrics.ToEnvelopeItem() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if item == nil || item.Header == nil || item.Header.Type != EnvelopeItemTypeTraceMetric { + t.Fatalf("unexpected envelope item: %#v", item) + } + + var payload struct { + Items []json.RawMessage `json:"items"` + } + if err := json.Unmarshal(item.Payload, &payload); err != nil { + t.Fatalf("failed to unmarshal payload: %v", err) + } + if len(payload.Items) != 2 { + t.Fatalf("expected 2 items, got %d", len(payload.Items)) + } + + if Metrics(nil).GetCategory() != ratelimit.CategoryTraceMetric { + t.Fatal("category mismatch") + } + if Metrics(nil).GetEventID() != "" { + t.Fatal("event id should be empty") + } + if Metrics(nil).GetSdkInfo() != nil { + t.Fatal("sdk info should be nil") + } + if Metrics(nil).GetDynamicSamplingContext() != nil { + t.Fatal("dsc should be nil") + } +} diff --git a/internal/ratelimit/category.go b/internal/ratelimit/category.go index 0a5604e2d..aec8bb8d0 100644 --- a/internal/ratelimit/category.go +++ b/internal/ratelimit/category.go @@ -8,7 +8,7 @@ import ( ) // Reference: -// https://github.com/getsentry/relay/blob/0424a2e017d193a93918053c90cdae9472d164bf/relay-common/src/constants.rs#L116-L127 +// https://github.com/getsentry/relay/blob/46dfaa850b8717a6e22c3e9a275ba17fe673b9da/relay-base-schema/src/data_category.rs#L231-L271 // Category classifies supported payload types that can be ingested by Sentry // and, therefore, rate limited. @@ -22,6 +22,7 @@ const ( CategoryTransaction Category = "transaction" CategoryLog Category = "log_item" CategoryMonitor Category = "monitor" + CategoryTraceMetric Category = "trace_metric" ) // knownCategories is the set of currently known categories. Other categories @@ -32,6 +33,7 @@ var knownCategories = map[Category]struct{}{ CategoryTransaction: {}, CategoryLog: {}, CategoryMonitor: {}, + CategoryTraceMetric: {}, } // String returns the category formatted for debugging. @@ -47,6 +49,8 @@ func (c Category) String() string { return "CategoryLog" case CategoryMonitor: return "CategoryMonitor" + case CategoryTraceMetric: + return "CategoryTraceMetric" default: // For unknown categories, use the original formatting logic caser := cases.Title(language.English) @@ -97,6 +101,8 @@ func (c Category) GetPriority() Priority { return PriorityLow case CategoryTransaction: return PriorityMedium + case CategoryTraceMetric: + return PriorityLow default: return PriorityMedium } diff --git a/internal/ratelimit/category_test.go b/internal/ratelimit/category_test.go index 50ae28460..d9ef1afe0 100644 --- a/internal/ratelimit/category_test.go +++ b/internal/ratelimit/category_test.go @@ -14,6 +14,7 @@ func TestCategory_String(t *testing.T) { {CategoryTransaction, "CategoryTransaction"}, {CategoryMonitor, "CategoryMonitor"}, {CategoryLog, "CategoryLog"}, + {CategoryTraceMetric, "CategoryTraceMetric"}, {Category("custom type"), "CategoryCustomType"}, {Category("multi word type"), "CategoryMultiWordType"}, } @@ -35,6 +36,7 @@ func TestKnownCategories(t *testing.T) { CategoryTransaction, CategoryMonitor, CategoryLog, + CategoryTraceMetric, } for _, category := range expectedCategories { @@ -90,6 +92,7 @@ func TestCategory_GetPriority(t *testing.T) { {CategoryMonitor, PriorityHigh}, {CategoryLog, PriorityLow}, {CategoryTransaction, PriorityMedium}, + {CategoryTraceMetric, PriorityLow}, {Category("unknown"), PriorityMedium}, } diff --git a/internal/telemetry/scheduler.go b/internal/telemetry/scheduler.go index 940fc9184..b338a383d 100644 --- a/internal/telemetry/scheduler.go +++ b/internal/telemetry/scheduler.go @@ -232,6 +232,23 @@ func (s *Scheduler) processItems(buffer Storage[protocol.EnvelopeItemConvertible debuglog.Printf("error sending envelope: %v", err) } return + case ratelimit.CategoryTraceMetric: + metrics := protocol.Metrics(items) + header := &protocol.EnvelopeHeader{EventID: protocol.GenerateEventID(), SentAt: time.Now(), Sdk: s.sdkInfo} + if s.dsn != nil { + header.Dsn = s.dsn.String() + } + envelope := protocol.NewEnvelope(header) + item, err := metrics.ToEnvelopeItem() + if err != nil { + debuglog.Printf("error creating trace metric batch envelope item: %v", err) + return + } + envelope.AddItem(item) + if err := s.transport.SendEnvelope(envelope); err != nil { + debuglog.Printf("error sending envelope: %v", err) + } + return default: // if the buffers are properly configured, buffer.PollIfReady should return a single item for every category // other than logs. We still iterate over the items just in case, because we don't want to send broken envelopes. diff --git a/internal/testutils/maps.go b/internal/testutils/maps.go new file mode 100644 index 000000000..f2661f7e5 --- /dev/null +++ b/internal/testutils/maps.go @@ -0,0 +1,15 @@ +package testutils + +// MergeMaps merges multiple maps into a single map. +// If there are duplicate keys, the value from the last map takes precedence. +// +// CC BY-SA 4.0 Oliver (https://stackoverflow.com/a/74750675/3153224) +func MergeMaps[M ~map[K]V, K comparable, V any](src ...M) M { + merged := make(M) + for _, m := range src { + for k, v := range m { + merged[k] = v + } + } + return merged +} diff --git a/log_test.go b/log_test.go index 57f694341..07144b68d 100644 --- a/log_test.go +++ b/log_test.go @@ -31,10 +31,11 @@ func setupMockTransport() (context.Context, *MockTransport) { ServerName: "test-server", EnableLogs: true, EnableTracing: true, + EnableMetrics: true, }) mockClient.sdkIdentifier = "sentry.go" mockClient.sdkVersion = "0.10.0" - hub := CurrentHub() + hub := CurrentHub().Clone() hub.BindClient(mockClient) hub.Scope().propagationContext.TraceID = TraceIDFromHex(LogTraceID) @@ -733,7 +734,7 @@ func Test_sentryLogger_UserAttributes(t *testing.T) { }) mockClient.sdkIdentifier = "sentry.go" mockClient.sdkVersion = "0.10.0" - hub := CurrentHub() + hub := CurrentHub().Clone() hub.BindClient(mockClient) hub.Scope().propagationContext.TraceID = TraceIDFromHex(LogTraceID) diff --git a/metrics.go b/metrics.go new file mode 100644 index 000000000..260d8b8cb --- /dev/null +++ b/metrics.go @@ -0,0 +1,244 @@ +package sentry + +import ( + "context" + "os" + "sync" + "time" + + "github.com/getsentry/sentry-go/attribute" + "github.com/getsentry/sentry-go/internal/debuglog" +) + +// NewMeter returns a new Meter associated with the given context. +// If there is no Client associated with the context, or if metrics are disabled, +// it returns a no-op Meter that discards all metrics. +func NewMeter[T Numbers](ctx context.Context) Meter[T] { + var hub *Hub + hub = GetHubFromContext(ctx) + if hub == nil { + hub = CurrentHub() + } + + client := hub.Client() + if client != nil && client.options.EnableMetrics { + return &sentryMeter[T]{ + ctx: ctx, + client: client, + attributes: make(map[string]Attribute), + mu: sync.RWMutex{}, + } + } + + return &noopMeter[T]{} +} + +type sentryMeter[T Numbers] struct { + ctx context.Context + client *Client + attributes map[string]Attribute + mu sync.RWMutex +} + +func (s *sentryMeter[T]) emit(ctx context.Context, metricType MetricType, name string, value float64, unit string, attributes map[string]Attribute) { + if name == "" { + return + } + + hub := GetHubFromContext(ctx) + if hub == nil { + hub = CurrentHub() + } + + var traceID TraceID + var spanID SpanID + var span *Span + var user User + + scope := hub.Scope() + if scope != nil { + scope.mu.Lock() + span = scope.span + if span != nil { + traceID = span.TraceID + spanID = span.SpanID + } else { + traceID = scope.propagationContext.TraceID + } + user = scope.user + scope.mu.Unlock() + } + + attrs := map[string]Attribute{} + s.mu.RLock() + for k, v := range s.attributes { + attrs[k] = v + } + s.mu.RUnlock() + + for k, v := range attributes { + attrs[k] = v + } + + // Set default attributes + if release := s.client.options.Release; release != "" { + attrs["sentry.release"] = Attribute{Value: release, Type: AttributeString} + } + if environment := s.client.options.Environment; environment != "" { + attrs["sentry.environment"] = Attribute{Value: environment, Type: AttributeString} + } + if serverName := s.client.options.ServerName; serverName != "" { + attrs["sentry.server.address"] = Attribute{Value: serverName, Type: AttributeString} + } else if serverAddr, err := os.Hostname(); err == nil { + attrs["sentry.server.address"] = Attribute{Value: serverAddr, Type: AttributeString} + } + + if !user.IsEmpty() { + if user.ID != "" { + attrs["user.id"] = Attribute{Value: user.ID, Type: AttributeString} + } + if user.Name != "" { + attrs["user.name"] = Attribute{Value: user.Name, Type: AttributeString} + } + if user.Email != "" { + attrs["user.email"] = Attribute{Value: user.Email, Type: AttributeString} + } + } + if span != nil { + attrs["sentry.trace.parent_span_id"] = Attribute{Value: spanID.String(), Type: AttributeString} + } + if sdkIdentifier := s.client.sdkIdentifier; sdkIdentifier != "" { + attrs["sentry.sdk.name"] = Attribute{Value: sdkIdentifier, Type: AttributeString} + } + if sdkVersion := s.client.sdkVersion; sdkVersion != "" { + attrs["sentry.sdk.version"] = Attribute{Value: sdkVersion, Type: AttributeString} + } + + metric := &Metric{ + Timestamp: time.Now(), + TraceID: traceID, + Type: metricType, + Name: name, + Value: value, + Unit: unit, + Attributes: attrs, + } + + if s.client.options.BeforeSendMetric != nil { + metric = s.client.options.BeforeSendMetric(metric) + } + + if metric != nil { + if s.client.telemetryBuffer != nil { + if !s.client.telemetryBuffer.Add(metric) { + debuglog.Printf("Dropping event: metric buffer full or category missing") + } + } else if s.client.batchMeter != nil { + s.client.batchMeter.metricsCh <- *metric + } + } + + if s.client.options.Debug { + debuglog.Printf("Metric %s [%s]: %f %s", metricType, name, value, unit) + } +} + +// Count implements Meter. +func (s *sentryMeter[T]) Count(name string, count int64, options MeterOptions) { + attrs := make(map[string]Attribute) + if options.Attributes != nil { + for _, attr := range options.Attributes { + t, ok := mapTypesToStr[attr.Value.Type()] + if !ok || t == "" { + debuglog.Printf("invalid attribute type set: %v", t) + continue + } + attrs[attr.Key] = Attribute{Value: attr.Value.AsInterface(), Type: t} + } + } + + s.emit(s.ctx, MetricTypeCounter, name, float64(count), "", attrs) +} + +// Distribution implements Meter. +func (s *sentryMeter[T]) Distribution(name string, sample float64, options MeterOptions) { + attrs := make(map[string]Attribute) + if options.Attributes != nil { + for _, attr := range options.Attributes { + t, ok := mapTypesToStr[attr.Value.Type()] + if !ok || t == "" { + debuglog.Printf("invalid attribute type set: %v", t) + continue + } + attrs[attr.Key] = Attribute{Value: attr.Value.AsInterface(), Type: t} + } + } + + s.emit(s.ctx, MetricTypeDistribution, name, sample, options.Unit, attrs) +} + +// Gauge implements Meter. +func (s *sentryMeter[T]) Gauge(name string, value T, options MeterOptions) { + attrs := make(map[string]Attribute) + if options.Attributes != nil { + for _, attr := range options.Attributes { + t, ok := mapTypesToStr[attr.Value.Type()] + if !ok || t == "" { + debuglog.Printf("invalid attribute type set: %v", t) + continue + } + attrs[attr.Key] = Attribute{Value: attr.Value.AsInterface(), Type: t} + } + } + + s.emit(s.ctx, MetricTypeGauge, name, float64(value), options.Unit, attrs) +} + +// GetCtx implements Meter. +func (s *sentryMeter[T]) GetCtx() context.Context { + return s.ctx +} + +// SetAttributes implements Meter. +func (s *sentryMeter[T]) SetAttributes(attrs ...attribute.Builder) { + s.mu.Lock() + defer s.mu.Unlock() + + for _, v := range attrs { + t, ok := mapTypesToStr[v.Value.Type()] + if !ok || t == "" { + debuglog.Printf("invalid attribute type set: %v", t) + continue + } + + s.attributes[v.Key] = Attribute{ + Value: v.Value.AsInterface(), + Type: t, + } + } +} + +// noopMeter is a no-operation implementation of Meter. +// This is used when there is no client available in the context. +type noopMeter[T Numbers] struct{} + +// Count implements Meter. +func (n *noopMeter[T]) Count(_ string, _ int64, _ MeterOptions) { +} + +// Distribution implements Meter. +func (n *noopMeter[T]) Distribution(_ string, _ float64, _ MeterOptions) { +} + +// Gauge implements Meter. +func (n *noopMeter[T]) Gauge(_ string, _ T, _ MeterOptions) { +} + +// GetCtx implements Meter. +func (n *noopMeter[T]) GetCtx() context.Context { + return context.Background() +} + +// SetAttributes implements Meter. +func (n *noopMeter[T]) SetAttributes(...attribute.Builder) { +} diff --git a/metrics_test.go b/metrics_test.go new file mode 100644 index 000000000..ec97a7cb9 --- /dev/null +++ b/metrics_test.go @@ -0,0 +1,351 @@ +package sentry + +import ( + "context" + "testing" + "time" + + "github.com/getsentry/sentry-go/attribute" + "github.com/getsentry/sentry-go/internal/testutils" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" +) + +func Test_sentryMeter_Methods(t *testing.T) { + attrs := map[string]Attribute{ + "sentry.release": {Value: "v1.2.3", Type: "string"}, + "sentry.environment": {Value: "testing", Type: "string"}, + "sentry.server.address": {Value: "test-server", Type: "string"}, + "sentry.sdk.name": {Value: "sentry.go", Type: "string"}, + "sentry.sdk.version": {Value: "0.10.0", Type: "string"}, + } + + tests := []struct { + name string + metricsFunc func(m Meter[float64]) + wantEvents []Event + }{ + { + name: "count", + metricsFunc: func(m Meter[float64]) { + m.Count("test.count", 5, MeterOptions{ + Attributes: []attribute.Builder{attribute.String("key.string", "value")}, + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.count", + Value: 5, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.string": {Value: "value", Type: "string"}, + }), + Type: MetricTypeCounter, + Unit: "", + }, + }, + }, + }, + }, + { + name: "distribution", + metricsFunc: func(m Meter[float64]) { + m.Distribution("test.distribution", 3.14, MeterOptions{ + Attributes: []attribute.Builder{attribute.Int("key.int", 42)}, + Unit: "ms", + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.distribution", + Value: 3.14, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.int": {Value: int64(42), Type: "integer"}, + }), + Type: MetricTypeDistribution, + Unit: "ms", + }, + }, + }, + }, + }, + { + name: "gauge", + metricsFunc: func(m Meter[float64]) { + m.Gauge("test.gauge", 2.71, MeterOptions{ + Attributes: []attribute.Builder{attribute.Float64("key.float", 1.618)}, + Unit: "requests", + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.gauge", + Value: 2.71, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.float": {Value: 1.618, Type: "double"}, + }), + Type: MetricTypeGauge, + Unit: "requests", + }, + }, + }, + }, + }, + { + name: "zero count", + metricsFunc: func(m Meter[float64]) { + m.Count("test.zero.count", 0, MeterOptions{ + Attributes: []attribute.Builder{attribute.String("key.string", "value")}, + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.zero.count", + Value: 0, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.string": {Value: "value", Type: "string"}, + }), + Type: MetricTypeCounter, + Unit: "", + }, + }, + }, + }, + }, + { + name: "zero distribution", + metricsFunc: func(m Meter[float64]) { + m.Distribution("test.zero.distribution", 0, MeterOptions{ + Attributes: []attribute.Builder{attribute.String("key.string", "value")}, + Unit: "bytes", + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.zero.distribution", + Value: 0, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.string": {Value: "value", Type: "string"}, + }), + Type: MetricTypeDistribution, + Unit: "bytes", + }, + }, + }, + }, + }, + { + name: "zero gauge", + metricsFunc: func(m Meter[float64]) { + m.Gauge("test.zero.gauge", 0, MeterOptions{ + Attributes: []attribute.Builder{attribute.String("key.string", "value")}, + Unit: "connections", + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.zero.gauge", + Value: 0, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.string": {Value: "value", Type: "string"}, + }), + Type: MetricTypeGauge, + Unit: "connections", + }, + }, + }, + }, + }, + { + name: "negative count", + metricsFunc: func(m Meter[float64]) { + m.Count("test.negative.count", -10, MeterOptions{ + Attributes: []attribute.Builder{attribute.String("key.string", "value")}, + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.negative.count", + Value: -10, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.string": {Value: "value", Type: "string"}, + }), + Type: MetricTypeCounter, + Unit: "", + }, + }, + }, + }, + }, + { + name: "negative distribution", + metricsFunc: func(m Meter[float64]) { + m.Distribution("test.negative.distribution", -2.5, MeterOptions{ + Attributes: []attribute.Builder{attribute.String("key.string", "value")}, + Unit: "ms", + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.negative.distribution", + Value: -2.5, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.string": {Value: "value", Type: "string"}, + }), + Type: MetricTypeDistribution, + Unit: "ms", + }, + }, + }, + }, + }, + { + name: "negative gauge", + metricsFunc: func(m Meter[float64]) { + m.Gauge("test.negative.gauge", -5, MeterOptions{ + Attributes: []attribute.Builder{attribute.String("key.string", "value")}, + Unit: "connections", + }) + }, + wantEvents: []Event{ + { + Metrics: []Metric{ + { + TraceID: TraceIDFromHex(LogTraceID), + Name: "test.negative.gauge", + Value: -5, + Attributes: testutils.MergeMaps(attrs, map[string]Attribute{ + "key.string": {Value: "value", Type: "string"}, + }), + Type: MetricTypeGauge, + Unit: "connections", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, mockTransport := setupMockTransport() + meter := NewMeter[float64](ctx) + + tt.metricsFunc(meter) + Flush(testutils.FlushTimeout()) + + opts := cmp.Options{cmpopts.IgnoreFields(Metric{}, "Timestamp")} + + gotEvents := mockTransport.Events() + if len(gotEvents) != len(tt.wantEvents) { + t.Fatalf("got %d events, want %d", len(gotEvents), len(tt.wantEvents)) + } + + for i, event := range gotEvents { + assertEqual(t, event.Type, traceMetricEvent.Type) + if diff := cmp.Diff(tt.wantEvents[i].Metrics, event.Metrics, opts); diff != "" { + t.Errorf("event[%d] Metrics mismatch (-want +got):\n%s", i, diff) + } + mockTransport.events = nil + } + }) + } +} + +func Test_batchMeter_Flush(t *testing.T) { + ctx, mockTransport := setupMockTransport() + meter := NewMeter[float64](ctx) + meter.Count("test.count", 42, MeterOptions{}) + Flush(testutils.FlushTimeout()) + + events := mockTransport.Events() + if len(events) != 1 { + t.Fatalf("got %d events, want 1", len(events)) + } +} + +func Test_batchMeter_FlushWithContext(t *testing.T) { + ctx, mockTransport := setupMockTransport() + meter := NewMeter[float64](ctx) + meter.Count("test.count", 42, MeterOptions{}) + + cancelCtx, cancel := context.WithTimeout(context.Background(), testutils.FlushTimeout()) + FlushWithContext(cancelCtx) + defer cancel() + + events := mockTransport.Events() + if len(events) != 1 { + t.Fatalf("got %d events, want 1", len(events)) + } +} + +func Test_sentryMeter_BeforeSendMetric(t *testing.T) { + ctx := context.Background() + mockTransport := &MockTransport{} + mockClient, _ := NewClient(ClientOptions{ + Dsn: testDsn, + Transport: mockTransport, + Release: "v1.2.3", + Environment: "testing", + ServerName: "test-server", + EnableMetrics: true, + EnableTracing: true, + BeforeSendMetric: func(_ *Metric) *Metric { + return nil + }, + }) + mockClient.sdkIdentifier = "sentry.go" + mockClient.sdkVersion = "0.10.0" + hub := CurrentHub() + hub.BindClient(mockClient) + hub.Scope().propagationContext.TraceID = TraceIDFromHex(LogTraceID) + + ctx = SetHubOnContext(ctx, hub) + + meter := NewMeter[int](ctx) + meter.Count("test.count", 1, MeterOptions{}) + Flush(testutils.FlushTimeout()) + + events := mockTransport.Events() + if len(events) != 0 { + t.Fatalf("expected no events, got %d", len(events)) + } +} + +func Test_Meter_ExceedBatchSize(t *testing.T) { + ctx, mockTransport := setupMockTransport() + meter := NewMeter[int](ctx) + for i := 0; i < batchSize; i++ { + meter.Count("test.count", 1, MeterOptions{}) + } + + // sleep to wait for the batch to be processed + time.Sleep(time.Millisecond * 20) + events := mockTransport.Events() + if len(events) != 1 { + t.Fatalf("expected only one event with 100 metrics, got %d", len(events)) + } +}