encoder.go 2.8 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. // Copyright (c) 2017 Arista Networks, Inc.
  2. // Use of this source code is governed by the Apache License 2.0
  3. // that can be found in the COPYING file.
  4. package kafka
  5. import (
  6. "expvar"
  7. "fmt"
  8. "sync/atomic"
  9. "time"
  10. "notabug.org/themusicgod1/goarista/monitor"
  11. "github.com/Shopify/sarama"
  12. "notabug.org/themusicgod1/glog"
  13. "github.com/golang/protobuf/proto"
  14. )
  15. // MessageEncoder is an encoder interface
  16. // which handles encoding proto.Message to sarama.ProducerMessage
  17. type MessageEncoder interface {
  18. Encode(proto.Message) ([]*sarama.ProducerMessage, error)
  19. HandleSuccess(*sarama.ProducerMessage)
  20. HandleError(*sarama.ProducerError)
  21. }
  22. // BaseEncoder implements MessageEncoder interface
  23. // and mainly handle monitoring
  24. type BaseEncoder struct {
  25. // Used for monitoring
  26. numSuccesses monitor.Uint
  27. numFailures monitor.Uint
  28. histogram *monitor.LatencyHistogram
  29. }
  30. // counter counts the number Sysdb clients we have, and is used to guarantee that we
  31. // always have a unique name exported to expvar
  32. var counter uint32
  33. // NewBaseEncoder returns a new base MessageEncoder
  34. func NewBaseEncoder(typ string) *BaseEncoder {
  35. // Setup monitoring structures
  36. histName := "kafkaProducerHistogram_" + typ
  37. statsName := "messagesStats"
  38. if id := atomic.AddUint32(&counter, 1); id > 1 {
  39. histName = fmt.Sprintf("%s_%d", histName, id)
  40. statsName = fmt.Sprintf("%s_%d", statsName, id)
  41. }
  42. hist := monitor.NewLatencyHistogram(histName, time.Microsecond, 32, 0.3, 1000, 0)
  43. e := &BaseEncoder{
  44. histogram: hist,
  45. }
  46. statsMap := expvar.NewMap(statsName)
  47. statsMap.Set("successes", &e.numSuccesses)
  48. statsMap.Set("failures", &e.numFailures)
  49. return e
  50. }
  51. // Encode encodes the proto message to a sarama.ProducerMessage
  52. func (e *BaseEncoder) Encode(message proto.Message) ([]*sarama.ProducerMessage,
  53. error) {
  54. // doesn't do anything, but keep it in order for BaseEncoder
  55. // to implement MessageEncoder interface
  56. return nil, nil
  57. }
  58. // HandleSuccess process the metadata of messages from kafka producer Successes channel
  59. func (e *BaseEncoder) HandleSuccess(msg *sarama.ProducerMessage) {
  60. // TODO: Fix this and provide an interface to get the metadata object
  61. metadata, ok := msg.Metadata.(Metadata)
  62. if !ok {
  63. return
  64. }
  65. // TODO: Add a monotonic clock source when one becomes available
  66. e.histogram.UpdateLatencyValues(time.Since(metadata.StartTime))
  67. e.numSuccesses.Add(uint64(metadata.NumMessages))
  68. }
  69. // HandleError process the metadata of messages from kafka producer Errors channel
  70. func (e *BaseEncoder) HandleError(msg *sarama.ProducerError) {
  71. // TODO: Fix this and provide an interface to get the metadata object
  72. metadata, ok := msg.Msg.Metadata.(Metadata)
  73. if !ok {
  74. return
  75. }
  76. // TODO: Add a monotonic clock source when one becomes available
  77. e.histogram.UpdateLatencyValues(time.Since(metadata.StartTime))
  78. glog.Errorf("Kafka Producer error: %s", msg.Error())
  79. e.numFailures.Add(uint64(metadata.NumMessages))
  80. }