diff --git a/.gitignore b/.gitignore index daf913b..8ebe58b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ # Folders _obj _test +tmp # Architecture specific extensions/prefixes *.[568vq] diff --git a/.travis.yml b/.travis.yml index 19d1d80..64d202a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,11 +1,13 @@ language: go go: -- 1.7 -- 1.8 -- 1.9 -- tip +- "1.7" +- "1.8" +- "1.9" +- "1.10" +- "master" matrix: allow_failures: - - go: tip + - go: "master" script: - go test -v -race -cpu=1,2,4 -bench . -benchmem ./... + - go test -v -race -cpu=1,2,4 -bench . -benchmem ./... + - go test -v -tags binary_log -race -cpu=1,2,4 -bench . -benchmem ./... diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..9ce57a6 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +zerolog.io \ No newline at end of file diff --git a/README.md b/README.md index 6fac775..eefc1f6 100644 --- a/README.md +++ b/README.md @@ -4,13 +4,17 @@ The zerolog package provides a fast and simple logger dedicated to JSON output. -Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON log events by avoiding allocations and reflection. +Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. -The uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with simpler to use API and even better performance. +Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. -To keep the code base and the API simple, zerolog focuses on JSON logging only. Pretty logging on the console is made possible using the provided (but inefficient) `zerolog.ConsoleWriter`. +To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). -![](pretty.png) +![Pretty Logging Image](pretty.png) + +## Who uses zerolog + +Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. ## Features @@ -22,54 +26,209 @@ To keep the code base and the API simple, zerolog focuses on JSON logging only. * Contextual fields * `context.Context` integration * `net/http` helpers +* JSON and CBOR encoding formats * Pretty logging for development -## Usage +## Installation ```go -import "github.com/rs/zerolog/log" +go get -u github.com/rs/zerolog/log ``` -### A global logger can be use for simple logging +## Getting Started + +### Simple Logging Example + +For simple logging, import the global logger package **github.com/rs/zerolog/log** ```go -log.Print("hello world") +package main -// Output: {"level":"debug","time":1494567715,"message":"hello world"} +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + // UNIX Time is faster and smaller than most timestamps + // If you set zerolog.TimeFieldFormat to an empty string, + // logs will write with UNIX time + zerolog.TimeFieldFormat = "" + + log.Print("hello world") +} + +// Output: {"time":1516134303,"level":"debug","message":"hello world"} ``` +> Note: By default log writes to `os.Stderr` +> Note: The default log level for `log.Print` is *debug* +### Contextual Logging + +**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: ```go -log.Info().Msg("hello world") +package main -// Output: {"level":"info","time":1494567715,"message":"hello world"} +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = "" + + log.Debug(). + Str("Scale", "833 cents"). + Float64("Interval", 833.09). + Msg("Fibonacci is everywhere") +} + +// Output: {"time":1524104936,"level":"debug","Scale":"833 cents","Interval":833.09,"message":"Fibonacci is everywhere"} ``` -NOTE: To import the global logger, import the `log` subpackage `github.com/rs/zerolog/log`. +> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) + +### Leveled Logging + +#### Simple Leveled Logging Example ```go -log.Fatal(). - Err(err). - Str("service", service). - Msgf("Cannot start %s", service) +package main -// Output: {"level":"fatal","time":1494567715,"message":"Cannot start myservice","error":"some error","service":"myservice"} -// Exit 1 +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = "" + + log.Info().Msg("hello world") +} + +// Output: {"time":1516134303,"level":"info","message":"hello world"} ``` -NOTE: Using `Msgf` generates one allocation even when the logger is disabled. +> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. -### Fields can be added to log messages +**zerolog** allows for logging at the following levels (from highest to lowest): + +* panic (`zerolog.PanicLevel`, 5) +* fatal (`zerolog.FatalLevel`, 4) +* error (`zerolog.ErrorLevel`, 3) +* warn (`zerolog.WarnLevel`, 2) +* info (`zerolog.InfoLevel`, 1) +* debug (`zerolog.DebugLevel`, 0) + +You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. + +#### Setting Global Log Level + +This example uses command-line flags to demonstrate various outputs depending on the chosen log level. ```go -log.Info(). - Str("foo", "bar"). - Int("n", 123). - Msg("hello world") +package main -// Output: {"level":"info","time":1494567715,"foo":"bar","n":123,"message":"hello world"} +import ( + "flag" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = "" + debug := flag.Bool("debug", false, "sets log level to debug") + + flag.Parse() + + // Default level for this example is info, unless debug flag is present + zerolog.SetGlobalLevel(zerolog.InfoLevel) + if *debug { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + log.Debug().Msg("This message appears only when log level set to Debug") + log.Info().Msg("This message appears when log level set to Debug or Info") + + if e := log.Debug(); e.Enabled() { + // Compute log output only if enabled. + value := "bar" + e.Str("foo", value).Msg("some debug message") + } +} ``` +Info Output (no flag) + +```bash +$ ./logLevelExample +{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} +``` + +Debug Output (debug flag set) + +```bash +$ ./logLevelExample -debug +{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} +{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} +{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} +``` + +#### Logging without Level or Message + +You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = "" + + log.Log(). + Str("foo", "bar"). + Msg("") +} + +// Output: {"time":1494567715,"foo":"bar"} +``` + +#### Logging Fatal Messages + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + err := errors.New("A repo man spends his life getting into tense situations") + service := "myservice" + + zerolog.TimeFieldFormat = "" + + log.Fatal(). + Err(err). + Str("service", service). + Msgf("Cannot start %s", service) +} + +// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} +// exit status 1 +``` + +> NOTE: Using `Msgf` generates one allocation even when the logger is disabled. + ### Create logger instance to manage different outputs ```go @@ -84,40 +243,47 @@ logger.Info().Str("foo", "bar").Msg("hello world") ```go sublogger := log.With(). - Str("component": "foo"). + Str("component", "foo"). Logger() sublogger.Info().Msg("hello world") // Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} ``` -### Level logging - -```go -zerolog.SetGlobalLevel(zerolog.InfoLevel) - -log.Debug().Msg("filtered out message") -log.Info().Msg("routed message") - -if e := log.Debug(); e.Enabled() { - // Compute log output only if enabled. - value := compute() - e.Str("foo": value).Msg("some debug message") -} - -// Output: {"level":"info","time":1494567715,"message":"routed message"} -``` - ### Pretty logging +To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: + ```go -if isConsole { - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) -} +log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) log.Info().Str("foo", "bar").Msg("Hello world") -// Output: 1494567715 |INFO| Hello world foo=bar +// Output: 3:04PM INF Hello World foo=bar +``` + +To customize the configuration and formatting: + +```go +output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} +output.FormatLevel = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) +} +output.FormatMessage = func(i interface{}) string { + return fmt.Sprintf("***%s****", i) +} +output.FormatFieldName = func(i interface{}) string { + return fmt.Sprintf("%s:", i) +} +output.FormatFieldValue = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("%s", i)) +} + +log := zerolog.New(output).With().Timestamp().Logger() + +log.Info().Str("foo", "bar").Msg("Hello World") + +// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR ``` ### Sub dictionary @@ -127,7 +293,7 @@ log.Info(). Str("foo", "bar"). Dict("dict", zerolog.Dict(). Str("bar", "baz"). - Int("n", 1) + Int("n", 1), ).Msg("hello world") // Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} @@ -145,20 +311,36 @@ log.Info().Msg("hello world") // Output: {"l":"info","t":1494567715,"m":"hello world"} ``` -### Log with no level nor message - -```go -log.Log().Str("foo","bar").Msg("") - -// Output: {"time":1494567715,"foo":"bar"} -``` - ### Add contextual fields to the global logger ```go log.Logger = log.With().Str("foo", "bar").Logger() ``` +### Add file and line number to log + +```go +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} +``` + + +### Thread-safe, lock-free, non-blocking writer + +If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow: + +```go +wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { + fmt.Printf("Logger Dropped %d messages", missed) + }) +log := zerolog.New(w) +log.Print("test") +``` + +You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. + ### Log Sampling ```go @@ -206,7 +388,7 @@ hooked.Warn().Msg("") ### Pass a sub-logger by context ```go -ctx := log.With("component", "module").Logger().WithContext(ctx) +ctx := log.With().Str("component", "module").Logger().WithContext(ctx) log.Ctx(ctx).Info().Msg("hello world") @@ -286,17 +468,18 @@ if err := http.ListenAndServe(":8080", nil); err != nil { Some settings can be changed and will by applied to all loggers: * `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). -* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Set this to `zerolog.Disable` to disable logging altogether (quiet mode). +* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Set this to `zerolog.Disabled` to disable logging altogether (quiet mode). * `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. * `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. * `zerolog.LevelFieldName`: Can be set to customize level field name. * `zerolog.MessageFieldName`: Can be set to customize message field name. * `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. * `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with an empty string, times are formated as UNIX timestamp. - // DurationFieldUnit defines the unit for time.Duration type fields added - // using the Dur method. + // DurationFieldUnit defines the unit for time.Duration type fields added + // using the Dur method. * `DurationFieldUnit`: Sets the unit of the fields added by `Dur` (default: `time.Millisecond`). * `DurationFieldInteger`: If set to true, `Dur` fields are formatted as integers instead of floats. +* `ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. ## Field Types @@ -317,22 +500,39 @@ Some settings can be changed and will by applied to all loggers: * `Dict`: Adds a sub-key/value as a field of the event. * `Interface`: Uses reflection to marshal the type. +## Binary Encoding + +In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: + +```bash +go build -tags binary_log . +``` + +To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work +with zerolog library is [CSD](https://github.com/toravir/csd/). + +## Related Projects + +* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` + ## Benchmarks +See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. + All operations are allocation free (those numbers *include* JSON encoding): -``` -BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op -BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op -BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op -BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op -BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op +```text +BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op +BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op +BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op +BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op +BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op ``` There are a few Go logging benchmarks and comparisons that include zerolog. -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) -- [uber-common/zap](https://github.com/uber-go/zap#performance) +* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) +* [uber-common/zap](https://github.com/uber-go/zap#performance) Using Uber's zap comparison benchmark: @@ -376,3 +576,16 @@ Log a static string, without any context or `printf`-style templating: | apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | | log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | +## Caveats + +Note that zerolog does de-duplication fields. Using the same key multiple times creates multiple keys in final JSON: + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +logger.Info(). + Timestamp(). + Msg("dup") +// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +``` + +However, it’s not a big deal as JSON accepts dup keys; the last one prevails. diff --git a/_config.yml b/_config.yml new file mode 100644 index 0000000..a1e896d --- /dev/null +++ b/_config.yml @@ -0,0 +1 @@ +remote_theme: rs/gh-readme diff --git a/array.go b/array.go index ca17b8e..aa4f623 100644 --- a/array.go +++ b/array.go @@ -1,10 +1,9 @@ package zerolog import ( + "net" "sync" "time" - - "github.com/rs/zerolog/internal/json" ) var arrayPool = &sync.Pool{ @@ -15,10 +14,26 @@ var arrayPool = &sync.Pool{ }, } +// Array is used to prepopulate an array of items +// which can be re-used to add to log messages. type Array struct { buf []byte } +func putArray(a *Array) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(a.buf) > maxSize { + return + } + arrayPool.Put(a) +} + // Arr creates an array to be added to an Event or Context. func Arr() *Array { a := arrayPool.Get().(*Array) @@ -26,144 +41,184 @@ func Arr() *Array { return a } +// MarshalZerologArray method here is no-op - since data is +// already in the needed format. func (*Array) MarshalZerologArray(*Array) { } func (a *Array) write(dst []byte) []byte { - if len(a.buf) == 0 { - dst = append(dst, `[]`...) - } else { - a.buf[0] = '[' - dst = append(append(dst, a.buf...), ']') + dst = enc.AppendArrayStart(dst) + if len(a.buf) > 0 { + dst = append(append(dst, a.buf...)) } - arrayPool.Put(a) + dst = enc.AppendArrayEnd(dst) + putArray(a) return dst } // Object marshals an object that implement the LogObjectMarshaler -// interface and append it to the array. +// interface and append append it to the array. func (a *Array) Object(obj LogObjectMarshaler) *Array { - a.buf = append(a.buf, ',') e := Dict() obj.MarshalZerologObject(e) - e.buf = append(e.buf, '}') - a.buf = append(a.buf, e.buf...) + e.buf = enc.AppendEndMarker(e.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) return a } -// Str append the val as a string to the array. +// Str append append the val as a string to the array. func (a *Array) Str(val string) *Array { - a.buf = json.AppendString(append(a.buf, ','), val) + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) return a } -// Bytes append the val as a string to the array. +// Bytes append append the val as a string to the array. func (a *Array) Bytes(val []byte) *Array { - a.buf = json.AppendBytes(append(a.buf, ','), val) + a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) return a } -// Err append the err as a string to the array. +// Hex append append the val as a hex string to the array. +func (a *Array) Hex(val []byte) *Array { + a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Err serializes and appends the err to the array. func (a *Array) Err(err error) *Array { - a.buf = json.AppendError(append(a.buf, ','), err) + marshaled := ErrorMarshalFunc(err) + switch m := marshaled.(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + case error: + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) + case string: + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) + default: + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) + } + return a } -// Bool append the val as a bool to the array. +// Bool append append the val as a bool to the array. func (a *Array) Bool(b bool) *Array { - a.buf = json.AppendBool(append(a.buf, ','), b) + a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) return a } -// Int append i as a int to the array. +// Int append append i as a int to the array. func (a *Array) Int(i int) *Array { - a.buf = json.AppendInt(append(a.buf, ','), i) + a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) return a } -// Int8 append i as a int8 to the array. +// Int8 append append i as a int8 to the array. func (a *Array) Int8(i int8) *Array { - a.buf = json.AppendInt8(append(a.buf, ','), i) + a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) return a } -// Int16 append i as a int16 to the array. +// Int16 append append i as a int16 to the array. func (a *Array) Int16(i int16) *Array { - a.buf = json.AppendInt16(append(a.buf, ','), i) + a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) return a } -// Int32 append i as a int32 to the array. +// Int32 append append i as a int32 to the array. func (a *Array) Int32(i int32) *Array { - a.buf = json.AppendInt32(append(a.buf, ','), i) + a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) return a } -// Int64 append i as a int64 to the array. +// Int64 append append i as a int64 to the array. func (a *Array) Int64(i int64) *Array { - a.buf = json.AppendInt64(append(a.buf, ','), i) + a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) return a } -// Uint append i as a uint to the array. +// Uint append append i as a uint to the array. func (a *Array) Uint(i uint) *Array { - a.buf = json.AppendUint(append(a.buf, ','), i) + a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) return a } -// Uint8 append i as a uint8 to the array. +// Uint8 append append i as a uint8 to the array. func (a *Array) Uint8(i uint8) *Array { - a.buf = json.AppendUint8(append(a.buf, ','), i) + a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) return a } -// Uint16 append i as a uint16 to the array. +// Uint16 append append i as a uint16 to the array. func (a *Array) Uint16(i uint16) *Array { - a.buf = json.AppendUint16(append(a.buf, ','), i) + a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) return a } -// Uint32 append i as a uint32 to the array. +// Uint32 append append i as a uint32 to the array. func (a *Array) Uint32(i uint32) *Array { - a.buf = json.AppendUint32(append(a.buf, ','), i) + a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) return a } -// Uint64 append i as a uint64 to the array. +// Uint64 append append i as a uint64 to the array. func (a *Array) Uint64(i uint64) *Array { - a.buf = json.AppendUint64(append(a.buf, ','), i) + a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) return a } -// Float32 append f as a float32 to the array. +// Float32 append append f as a float32 to the array. func (a *Array) Float32(f float32) *Array { - a.buf = json.AppendFloat32(append(a.buf, ','), f) + a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) return a } -// Float64 append f as a float64 to the array. +// Float64 append append f as a float64 to the array. func (a *Array) Float64(f float64) *Array { - a.buf = json.AppendFloat64(append(a.buf, ','), f) + a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) return a } -// Time append t formated as string using zerolog.TimeFieldFormat. +// Time append append t formated as string using zerolog.TimeFieldFormat. func (a *Array) Time(t time.Time) *Array { - a.buf = json.AppendTime(append(a.buf, ','), t, TimeFieldFormat) + a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) return a } -// Dur append d to the array. +// Dur append append d to the array. func (a *Array) Dur(d time.Duration) *Array { - a.buf = json.AppendDuration(append(a.buf, ','), d, DurationFieldUnit, DurationFieldInteger) + a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) return a } -// Interface append i marshaled using reflection. +// Interface append append i marshaled using reflection. func (a *Array) Interface(i interface{}) *Array { if obj, ok := i.(LogObjectMarshaler); ok { return a.Object(obj) } - a.buf = json.AppendInterface(append(a.buf, ','), i) + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) + return a +} + +// IPAddr adds IPv4 or IPv6 address to the array +func (a *Array) IPAddr(ip net.IP) *Array { + a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) + return a +} + +// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array +func (a *Array) IPPrefix(pfx net.IPNet) *Array { + a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) + return a +} + +// MACAddr adds a MAC (Ethernet) address to the array +func (a *Array) MACAddr(ha net.HardwareAddr) *Array { + a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) return a } diff --git a/array_test.go b/array_test.go index 02fe9ae..66c6668 100644 --- a/array_test.go +++ b/array_test.go @@ -1,6 +1,7 @@ package zerolog import ( + "net" "testing" "time" ) @@ -18,13 +19,16 @@ func TestArray(t *testing.T) { Uint16(8). Uint32(9). Uint64(10). - Float32(11). - Float64(12). + Float32(11.98122). + Float64(12.987654321). Str("a"). + Bytes([]byte("b")). + Hex([]byte{0x1f}). Time(time.Time{}). + IPAddr(net.IP{192, 168, 0, 10}). Dur(0) - want := `[true,1,2,3,4,5,6,7,8,9,10,11,12,"a","0001-01-01T00:00:00Z",0]` - if got := string(a.write([]byte{})); got != want { + want := `[true,1,2,3,4,5,6,7,8,9,10,11.98122,12.987654321,"a","b","1f","0001-01-01T00:00:00Z","192.168.0.10",0]` + if got := decodeObjectToStr(a.write([]byte{})); got != want { t.Errorf("Array.write()\ngot: %s\nwant: %s", got, want) } } diff --git a/benchmark_test.go b/benchmark_test.go index 25e9225..4ae9d49 100644 --- a/benchmark_test.go +++ b/benchmark_test.go @@ -96,6 +96,22 @@ func (o obj) MarshalZerologObject(e *Event) { Int("priv", o.priv) } +func BenchmarkLogArrayObject(b *testing.B) { + obj1 := obj{"a", "b", 2} + obj2 := obj{"c", "d", 3} + obj3 := obj{"e", "f", 4} + logger := New(ioutil.Discard) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + arr := Arr() + arr.Object(&obj1) + arr.Object(&obj2) + arr.Object(&obj3) + logger.Info().Array("objects", arr).Msg("test") + } +} + func BenchmarkLogFieldType(b *testing.B) { bools := []bool{true, false, true, false, true, false, true, false, true, false} ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} diff --git a/binary_test.go b/binary_test.go new file mode 100644 index 0000000..b4b52d9 --- /dev/null +++ b/binary_test.go @@ -0,0 +1,499 @@ +// +build binary_log + +package zerolog + +import ( + "bytes" + "errors" + "fmt" + + // "io/ioutil" + stdlog "log" + "time" +) + +func ExampleBinaryNew() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Info().Msg("hello world") + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"info","message":"hello world"} +} + +func ExampleLogger_With() { + dst := bytes.Buffer{} + log := New(&dst). + With(). + Str("foo", "bar"). + Logger() + + log.Info().Msg("hello world") + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + + // Output: {"level":"info","foo":"bar","message":"hello world"} +} + +func ExampleLogger_Level() { + dst := bytes.Buffer{} + log := New(&dst).Level(WarnLevel) + + log.Info().Msg("filtered out message") + log.Error().Msg("kept message") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"error","message":"kept message"} +} + +func ExampleLogger_Sample() { + dst := bytes.Buffer{} + log := New(&dst).Sample(&BasicSampler{N: 2}) + + log.Info().Msg("message 1") + log.Info().Msg("message 2") + log.Info().Msg("message 3") + log.Info().Msg("message 4") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"info","message":"message 1"} + // {"level":"info","message":"message 3"} +} + +type LevelNameHook1 struct{} + +func (h LevelNameHook1) Run(e *Event, l Level, msg string) { + if l != NoLevel { + e.Str("level_name", l.String()) + } else { + e.Str("level_name", "NoLevel") + } +} + +type MessageHook string + +func (h MessageHook) Run(e *Event, l Level, msg string) { + e.Str("the_message", msg) +} + +func ExampleLogger_Hook() { + var levelNameHook LevelNameHook1 + var messageHook MessageHook = "The message" + + dst := bytes.Buffer{} + log := New(&dst).Hook(levelNameHook).Hook(messageHook) + + log.Info().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"info","level_name":"info","the_message":"hello world","message":"hello world"} +} + +func ExampleLogger_Print() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Print("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"debug","message":"hello world"} +} + +func ExampleLogger_Printf() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Printf("hello %s", "world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"debug","message":"hello world"} +} + +func ExampleLogger_Debug() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Debug(). + Str("foo", "bar"). + Int("n", 123). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"debug","foo":"bar","n":123,"message":"hello world"} +} + +func ExampleLogger_Info() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Info(). + Str("foo", "bar"). + Int("n", 123). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"info","foo":"bar","n":123,"message":"hello world"} +} + +func ExampleLogger_Warn() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Warn(). + Str("foo", "bar"). + Msg("a warning message") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"warn","foo":"bar","message":"a warning message"} +} + +func ExampleLogger_Error() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Error(). + Err(errors.New("some error")). + Msg("error doing something") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"error","error":"some error","message":"error doing something"} +} + +func ExampleLogger_WithLevel() { + dst := bytes.Buffer{} + log := New(&dst) + + log.WithLevel(InfoLevel). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"level":"info","message":"hello world"} +} + +func ExampleLogger_Write() { + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Logger() + + stdlog.SetFlags(0) + stdlog.SetOutput(log) + + stdlog.Print("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","message":"hello world"} +} + +func ExampleLogger_Log() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Log(). + Str("foo", "bar"). + Str("bar", "baz"). + Msg("") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","bar":"baz"} +} + +func ExampleEvent_Dict() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Log(). + Str("foo", "bar"). + Dict("dict", Dict(). + Str("bar", "baz"). + Int("n", 1), + ). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} +} + +type User struct { + Name string + Age int + Created time.Time +} + +func (u User) MarshalZerologObject(e *Event) { + e.Str("name", u.Name). + Int("age", u.Age). + Time("created", u.Created) +} + +type Users []User + +func (uu Users) MarshalZerologArray(a *Array) { + for _, u := range uu { + a.Object(u) + } +} + +func ExampleEvent_Array() { + dst := bytes.Buffer{} + log := New(&dst) + + log.Log(). + Str("foo", "bar"). + Array("array", Arr(). + Str("baz"). + Int(1), + ). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","array":["baz",1],"message":"hello world"} +} + +func ExampleEvent_Array_object() { + dst := bytes.Buffer{} + log := New(&dst) + + // Users implements LogArrayMarshaler + u := Users{ + User{"John", 35, time.Time{}}, + User{"Bob", 55, time.Time{}}, + } + + log.Log(). + Str("foo", "bar"). + Array("users", u). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","users":[{"name":"John","age":35,"created":"0001-01-01T00:00:00Z"},{"name":"Bob","age":55,"created":"0001-01-01T00:00:00Z"}],"message":"hello world"} +} + +func ExampleEvent_Object() { + dst := bytes.Buffer{} + log := New(&dst) + + // User implements LogObjectMarshaler + u := User{"John", 35, time.Time{}} + + log.Log(). + Str("foo", "bar"). + Object("user", u). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","user":{"name":"John","age":35,"created":"0001-01-01T00:00:00Z"},"message":"hello world"} +} + +func ExampleEvent_EmbedObject() { + price := Price{val: 6449, prec: 2, unit: "$"} + + dst := bytes.Buffer{} + log := New(&dst) + + log.Log(). + Str("foo", "bar"). + EmbedObject(price). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","price":"$64.49","message":"hello world"} +} + +func ExampleEvent_Interface() { + dst := bytes.Buffer{} + log := New(&dst) + + obj := struct { + Name string `json:"name"` + }{ + Name: "john", + } + + log.Log(). + Str("foo", "bar"). + Interface("obj", obj). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","obj":{"name":"john"},"message":"hello world"} +} + +func ExampleEvent_Dur() { + d := time.Duration(10 * time.Second) + + dst := bytes.Buffer{} + log := New(&dst) + + log.Log(). + Str("foo", "bar"). + Dur("dur", d). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","dur":10000,"message":"hello world"} +} + +func ExampleEvent_Durs() { + d := []time.Duration{ + time.Duration(10 * time.Second), + time.Duration(20 * time.Second), + } + + dst := bytes.Buffer{} + log := New(&dst) + + log.Log(). + Str("foo", "bar"). + Durs("durs", d). + Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","durs":[10000,20000],"message":"hello world"} +} + +func ExampleContext_Dict() { + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Dict("dict", Dict(). + Str("bar", "baz"). + Int("n", 1), + ).Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} +} + +func ExampleContext_Array() { + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Array("array", Arr(). + Str("baz"). + Int(1), + ).Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","array":["baz",1],"message":"hello world"} +} + +func ExampleContext_Array_object() { + // Users implements LogArrayMarshaler + u := Users{ + User{"John", 35, time.Time{}}, + User{"Bob", 55, time.Time{}}, + } + + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Array("users", u). + Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","users":[{"name":"John","age":35,"created":"0001-01-01T00:00:00Z"},{"name":"Bob","age":55,"created":"0001-01-01T00:00:00Z"}],"message":"hello world"} +} + +type Price struct { + val uint64 + prec int + unit string +} + +func (p Price) MarshalZerologObject(e *Event) { + denom := uint64(1) + for i := 0; i < p.prec; i++ { + denom *= 10 + } + result := []byte(p.unit) + result = append(result, fmt.Sprintf("%d.%d", p.val/denom, p.val%denom)...) + e.Str("price", string(result)) +} + +func ExampleContext_EmbedObject() { + price := Price{val: 6449, prec: 2, unit: "$"} + + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + EmbedObject(price). + Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","price":"$64.49","message":"hello world"} +} +func ExampleContext_Object() { + // User implements LogObjectMarshaler + u := User{"John", 35, time.Time{}} + + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Object("user", u). + Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","user":{"name":"John","age":35,"created":"0001-01-01T00:00:00Z"},"message":"hello world"} +} + +func ExampleContext_Interface() { + obj := struct { + Name string `json:"name"` + }{ + Name: "john", + } + + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Interface("obj", obj). + Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","obj":{"name":"john"},"message":"hello world"} +} + +func ExampleContext_Dur() { + d := time.Duration(10 * time.Second) + + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Dur("dur", d). + Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","dur":10000,"message":"hello world"} +} + +func ExampleContext_Durs() { + d := []time.Duration{ + time.Duration(10 * time.Second), + time.Duration(20 * time.Second), + } + + dst := bytes.Buffer{} + log := New(&dst).With(). + Str("foo", "bar"). + Durs("durs", d). + Logger() + + log.Log().Msg("hello world") + + fmt.Println(decodeIfBinaryToString(dst.Bytes())) + // Output: {"foo":"bar","durs":[10000,20000],"message":"hello world"} +} diff --git a/cmd/lint/lint.go b/cmd/lint/lint.go new file mode 100644 index 0000000..700371f --- /dev/null +++ b/cmd/lint/lint.go @@ -0,0 +1,175 @@ +package main + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/go/loader" +) + +var ( + recursivelyIgnoredPkgs arrayFlag + ignoredPkgs arrayFlag + ignoredFiles arrayFlag + allowedFinishers arrayFlag = []string{"Msg", "Msgf"} + rootPkg string +) + +// parse input flags and args +func init() { + flag.Var(&recursivelyIgnoredPkgs, "ignorePkgRecursively", "ignore the specified package and all subpackages recursively") + flag.Var(&ignoredPkgs, "ignorePkg", "ignore the specified package") + flag.Var(&ignoredFiles, "ignoreFile", "ignore the specified file by its path and/or go path (package/file.go)") + flag.Var(&allowedFinishers, "finisher", "allowed finisher for the event chain") + flag.Parse() + + // add zerolog to recursively ignored packages + recursivelyIgnoredPkgs = append(recursivelyIgnoredPkgs, "github.com/rs/zerolog") + args := flag.Args() + if len(args) != 1 { + fmt.Fprintln(os.Stderr, "you must provide exactly one package path") + os.Exit(1) + } + rootPkg = args[0] +} + +func main() { + // load the package and all its dependencies + conf := loader.Config{} + conf.Import(rootPkg) + p, err := conf.Load() + if err != nil { + fmt.Fprintf(os.Stderr, "Error: unable to load the root package. %s\n", err.Error()) + os.Exit(1) + } + + // get the github.com/rs/zerolog.Event type + event := getEvent(p) + if event == nil { + fmt.Fprintln(os.Stderr, "Error: github.com/rs/zerolog.Event declaration not found, maybe zerolog is not imported in the scanned package?") + os.Exit(1) + } + + // get all selections (function calls) with the github.com/rs/zerolog.Event (or pointer) receiver + selections := getSelectionsWithReceiverType(p, event) + + // print the violations (if any) + hasViolations := false + for _, s := range selections { + if hasBadFinisher(p, s) { + hasViolations = true + fmt.Printf("Error: missing or bad finisher for log chain, last call: %q at: %s:%v\n", s.fn.Name(), p.Fset.File(s.Pos()).Name(), p.Fset.Position(s.Pos()).Line) + } + } + + // if no violations detected, return normally + if !hasViolations { + fmt.Println("No violations found") + return + } + + // if violations were detected, return error code + os.Exit(1) +} + +func getEvent(p *loader.Program) types.Type { + for _, pkg := range p.AllPackages { + if strings.HasSuffix(pkg.Pkg.Path(), "github.com/rs/zerolog") { + for _, d := range pkg.Defs { + if d != nil && d.Name() == "Event" { + return d.Type() + } + } + } + } + + return nil +} + +func getSelectionsWithReceiverType(p *loader.Program, targetType types.Type) map[token.Pos]selection { + selections := map[token.Pos]selection{} + + for _, z := range p.AllPackages { + for i, t := range z.Selections { + switch o := t.Obj().(type) { + case *types.Func: + // this is not a bug, o.Type() is always *types.Signature, see docs + if vt := o.Type().(*types.Signature).Recv(); vt != nil { + typ := vt.Type() + if pointer, ok := typ.(*types.Pointer); ok { + typ = pointer.Elem() + } + + if typ == targetType { + if s, ok := selections[i.Pos()]; !ok || i.End() > s.End() { + selections[i.Pos()] = selection{i, o, z.Pkg} + } + } + } + default: + // skip + } + } + } + + return selections +} + +func hasBadFinisher(p *loader.Program, s selection) bool { + pkgPath := strings.TrimPrefix(s.pkg.Path(), rootPkg+"/vendor/") + absoluteFilePath := strings.TrimPrefix(p.Fset.File(s.Pos()).Name(), rootPkg+"/vendor/") + goFilePath := pkgPath + "/" + filepath.Base(p.Fset.Position(s.Pos()).Filename) + + for _, f := range allowedFinishers { + if f == s.fn.Name() { + return false + } + } + + for _, ignoredPkg := range recursivelyIgnoredPkgs { + if strings.HasPrefix(pkgPath, ignoredPkg) { + return false + } + } + + for _, ignoredPkg := range ignoredPkgs { + if pkgPath == ignoredPkg { + return false + } + } + + for _, ignoredFile := range ignoredFiles { + if absoluteFilePath == ignoredFile { + return false + } + + if goFilePath == ignoredFile { + return false + } + } + + return true +} + +type arrayFlag []string + +func (i *arrayFlag) String() string { + return fmt.Sprintf("%v", []string(*i)) +} + +func (i *arrayFlag) Set(value string) error { + *i = append(*i, value) + return nil +} + +type selection struct { + *ast.SelectorExpr + fn *types.Func + pkg *types.Package +} diff --git a/cmd/lint/readme.md b/cmd/lint/readme.md new file mode 100644 index 0000000..a15cba5 --- /dev/null +++ b/cmd/lint/readme.md @@ -0,0 +1,37 @@ +# Zerolog Lint + +This is a basic linter that checks for missing log event finishers. Finds errors like: `log.Error().Int64("userID": 5)` - missing the `Msg`/`Msgf` finishers. + +## Problem + +When using zerolog it's easy to forget to finish the log event chain by calling a finisher - the `Msg` or `Msgf` function that will schedule the event for writing. The problem with this is that it doesn't warn/panic during compilation and it's not easily found by grep or other general tools. It's even prominently mentioned in the project's readme, that: + +> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. + +## Solution + +A basic linter like this one here that looks for method invocations on `zerolog.Event` can examine the last call in a method call chain and check if it is a finisher, thus pointing out these errors. + +## Usage + +Just compile this and then run it. Or just run it via `go run` command via something like `go run cmd/lint/lint.go`. + +The command accepts only one argument - the package to be inspected - and 4 optional flags, all of which can occur multiple times. The standard synopsis of the command is: + +`lint [-finisher value] [-ignoreFile value] [-ignorePkg value] [-ignorePkgRecursively value] package` + +#### Flags + +- finisher + - specify which finishers to accept, defaults to `Msg` and `Msgf` +- ignoreFile + - which files to ignore, either by full path or by go path (package/file.go) +- ignorePkg + - do not inspect the specified package if found in the dependecy tree +- ignorePkgRecursively + - do not inspect the specified package or its subpackages if found in the dependency tree + +## Drawbacks + +As it is, linter can generate a false positives in a specific case. These false positives come from the fact that if you have a method that returns a `zerolog.Event` the linter will flag it because you are obviously not finishing the event. This will be solved in later release. + diff --git a/console.go b/console.go index ca15045..e13d419 100644 --- a/console.go +++ b/console.go @@ -5,108 +5,255 @@ import ( "encoding/json" "fmt" "io" + "os" "sort" "strconv" "strings" "sync" + "time" ) const ( - cReset = 0 - cBold = 1 - cRed = 31 - cGreen = 32 - cYellow = 33 - cBlue = 34 - cMagenta = 35 - cCyan = 36 - cGray = 37 - cDarkGray = 90 + colorBold = iota + 1 + colorFaint ) -var consoleBufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, 100)) - }, -} +const ( + colorBlack = iota + 30 + colorRed + colorGreen + colorYellow + colorBlue + colorMagenta + colorCyan + colorWhite +) -// ConsoleWriter reads a JSON object per write operation and output an -// optionally colored human readable version on the Out writer. +var ( + consoleBufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 100)) + }, + } +) + +const ( + consoleDefaultTimeFormat = time.Kitchen +) + +// Formatter transforms the input into a formatted string. +type Formatter func(interface{}) string + +// ConsoleWriter parses the JSON input and writes it in an +// (optionally) colorized, human-friendly format to Out. type ConsoleWriter struct { - Out io.Writer + // Out is the output destination. + Out io.Writer + + // NoColor disables the colorized output. NoColor bool + + // TimeFormat specifies the format for timestamp in output. + TimeFormat string + + // PartsOrder defines the order of parts in output. + PartsOrder []string + + FormatTimestamp Formatter + FormatLevel Formatter + FormatCaller Formatter + FormatMessage Formatter + FormatFieldName Formatter + FormatFieldValue Formatter + FormatErrFieldName Formatter + FormatErrFieldValue Formatter } +// NewConsoleWriter creates and initializes a new ConsoleWriter. +func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { + w := ConsoleWriter{ + Out: os.Stdout, + TimeFormat: consoleDefaultTimeFormat, + PartsOrder: consoleDefaultPartsOrder(), + } + + for _, opt := range options { + opt(&w) + } + + return w +} + +// Write transforms the JSON input with formatters and appends to w.Out. func (w ConsoleWriter) Write(p []byte) (n int, err error) { - var event map[string]interface{} - err = json.Unmarshal(p, &event) + if w.PartsOrder == nil { + w.PartsOrder = consoleDefaultPartsOrder() + } + + var buf = consoleBufPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + consoleBufPool.Put(buf) + }() + + var evt map[string]interface{} + p = decodeIfBinaryToBytes(p) + d := json.NewDecoder(bytes.NewReader(p)) + d.UseNumber() + err = d.Decode(&evt) if err != nil { - return + return n, fmt.Errorf("cannot decode event: %s", err) } - buf := consoleBufPool.Get().(*bytes.Buffer) - defer consoleBufPool.Put(buf) - lvlColor := cReset - level := "????" - if l, ok := event[LevelFieldName].(string); ok { - if !w.NoColor { - lvlColor = levelColor(l) - } - level = strings.ToUpper(l)[0:4] + + for _, p := range w.PartsOrder { + w.writePart(buf, evt, p) } - fmt.Fprintf(buf, "%s |%s| %s", - colorize(event[TimestampFieldName], cDarkGray, !w.NoColor), - colorize(level, lvlColor, !w.NoColor), - colorize(event[MessageFieldName], cReset, !w.NoColor)) - fields := make([]string, 0, len(event)) - for field := range event { + + w.writeFields(evt, buf) + + err = buf.WriteByte('\n') + if err != nil { + return n, err + } + _, err = buf.WriteTo(w.Out) + return len(p), err +} + +// writeFields appends formatted key-value pairs to buf. +func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { + var fields = make([]string, 0, len(evt)) + for field := range evt { switch field { - case LevelFieldName, TimestampFieldName, MessageFieldName: + case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: continue } fields = append(fields, field) } sort.Strings(fields) - for _, field := range fields { - fmt.Fprintf(buf, " %s=", colorize(field, cCyan, !w.NoColor)) - switch value := event[field].(type) { - case string: - if needsQuote(value) { - buf.WriteString(strconv.Quote(value)) - } else { - buf.WriteString(value) + + if len(fields) > 0 { + buf.WriteByte(' ') + } + + // Move the "error" field to the front + ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) + if ei < len(fields) && fields[ei] == ErrorFieldName { + fields[ei] = "" + fields = append([]string{ErrorFieldName}, fields...) + var xfields = make([]string, 0, len(fields)) + for _, field := range fields { + if field == "" { // Skip empty fields + continue } + xfields = append(xfields, field) + } + fields = xfields + } + + for i, field := range fields { + var fn Formatter + var fv Formatter + + if field == ErrorFieldName { + if w.FormatErrFieldName == nil { + fn = consoleDefaultFormatErrFieldName(w.NoColor) + } else { + fn = w.FormatErrFieldName + } + + if w.FormatErrFieldValue == nil { + fv = consoleDefaultFormatErrFieldValue(w.NoColor) + } else { + fv = w.FormatErrFieldValue + } + } else { + if w.FormatFieldName == nil { + fn = consoleDefaultFormatFieldName(w.NoColor) + } else { + fn = w.FormatFieldName + } + + if w.FormatFieldValue == nil { + fv = consoleDefaultFormatFieldValue + } else { + fv = w.FormatFieldValue + } + } + + buf.WriteString(fn(field)) + + switch fValue := evt[field].(type) { + case string: + if needsQuote(fValue) { + buf.WriteString(fv(strconv.Quote(fValue))) + } else { + buf.WriteString(fv(fValue)) + } + case json.Number: + buf.WriteString(fv(fValue)) default: - fmt.Fprint(buf, value) + b, err := json.Marshal(fValue) + if err != nil { + fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) + } else { + fmt.Fprint(buf, fv(b)) + } + } + + if i < len(fields)-1 { // Skip space for last field + buf.WriteByte(' ') } } - buf.WriteByte('\n') - buf.WriteTo(w.Out) - n = len(p) - return } -func colorize(s interface{}, color int, enabled bool) string { - if !enabled { - return fmt.Sprintf("%v", s) - } - return fmt.Sprintf("\x1b[%dm%v\x1b[0m", color, s) -} +// writePart appends a formatted part to buf. +func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { + var f Formatter -func levelColor(level string) int { - switch level { - case "debug": - return cMagenta - case "info": - return cGreen - case "warn": - return cYellow - case "error", "fatal", "panic": - return cRed + switch p { + case LevelFieldName: + if w.FormatLevel == nil { + f = consoleDefaultFormatLevel(w.NoColor) + } else { + f = w.FormatLevel + } + case TimestampFieldName: + if w.FormatTimestamp == nil { + f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) + } else { + f = w.FormatTimestamp + } + case MessageFieldName: + if w.FormatMessage == nil { + f = consoleDefaultFormatMessage + } else { + f = w.FormatMessage + } + case CallerFieldName: + if w.FormatCaller == nil { + f = consoleDefaultFormatCaller(w.NoColor) + } else { + f = w.FormatCaller + } default: - return cReset + if w.FormatFieldValue == nil { + f = consoleDefaultFormatFieldValue + } else { + f = w.FormatFieldValue + } + } + + var s = f(evt[p]) + + if len(s) > 0 { + buf.WriteString(s) + if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part + buf.WriteByte(' ') + } } } +// needsQuote returns true when the string s should be quoted in output. func needsQuote(s string) bool { for i := range s { if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { @@ -115,3 +262,114 @@ func needsQuote(s string) bool { } return false } + +// colorize returns the string s wrapped in ANSI code c, unless disabled is true. +func colorize(s interface{}, c int, disabled bool) string { + if disabled { + return fmt.Sprintf("%s", s) + } + return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) +} + +// ----- DEFAULT FORMATTERS --------------------------------------------------- + +func consoleDefaultPartsOrder() []string { + return []string{ + TimestampFieldName, + LevelFieldName, + CallerFieldName, + MessageFieldName, + } +} + +func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { + if timeFormat == "" { + timeFormat = consoleDefaultTimeFormat + } + return func(i interface{}) string { + t := "" + switch tt := i.(type) { + case string: + ts, err := time.Parse(TimeFieldFormat, tt) + if err != nil { + t = tt + } else { + t = ts.Format(timeFormat) + } + case json.Number: + t = tt.String() + } + return colorize(t, colorFaint, noColor) + } +} + +func consoleDefaultFormatLevel(noColor bool) Formatter { + return func(i interface{}) string { + var l string + if ll, ok := i.(string); ok { + switch ll { + case "debug": + l = colorize("DBG", colorYellow, noColor) + case "info": + l = colorize("INF", colorGreen, noColor) + case "warn": + l = colorize("WRN", colorRed, noColor) + case "error": + l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) + case "fatal": + l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) + case "panic": + l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) + default: + l = colorize("???", colorBold, noColor) + } + } else { + l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] + } + return l + } +} + +func consoleDefaultFormatCaller(noColor bool) Formatter { + return func(i interface{}) string { + var c string + if cc, ok := i.(string); ok { + c = cc + } + if len(c) > 0 { + cwd, err := os.Getwd() + if err == nil { + c = strings.TrimPrefix(c, cwd) + c = strings.TrimPrefix(c, "/") + } + c = colorize(c, colorBold, noColor) + colorize(" >", colorFaint, noColor) + } + return c + } +} + +func consoleDefaultFormatMessage(i interface{}) string { + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorFaint, noColor) + } +} + +func consoleDefaultFormatFieldValue(i interface{}) string { + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatErrFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorRed, noColor) + } +} + +func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s", i), colorRed, noColor) + } +} diff --git a/console_test.go b/console_test.go new file mode 100644 index 0000000..5bdfdaa --- /dev/null +++ b/console_test.go @@ -0,0 +1,238 @@ +package zerolog_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/rs/zerolog" +) + +func ExampleConsoleWriter() { + log := zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true}) + + log.Info().Str("foo", "bar").Msg("Hello World") + // Output: INF Hello World foo=bar +} + +func ExampleConsoleWriter_customFormatters() { + out := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true} + out.FormatLevel = func(i interface{}) string { return strings.ToUpper(fmt.Sprintf("%-6s|", i)) } + out.FormatFieldName = func(i interface{}) string { return fmt.Sprintf("%s:", i) } + out.FormatFieldValue = func(i interface{}) string { return strings.ToUpper(fmt.Sprintf("%s", i)) } + log := zerolog.New(out) + + log.Info().Str("foo", "bar").Msg("Hello World") + // Output: INFO | Hello World foo:BAR +} + +func ExampleNewConsoleWriter() { + out := zerolog.NewConsoleWriter() + out.NoColor = true // For testing purposes only + log := zerolog.New(out) + + log.Debug().Str("foo", "bar").Msg("Hello World") + // Output: DBG Hello World foo=bar +} + +func ExampleNewConsoleWriter_customFormatters() { + out := zerolog.NewConsoleWriter( + func(w *zerolog.ConsoleWriter) { + // Customize time format + w.TimeFormat = time.RFC822 + // Customize level formatting + w.FormatLevel = func(i interface{}) string { return strings.ToUpper(fmt.Sprintf("[%-5s]", i)) } + }, + ) + out.NoColor = true // For testing purposes only + + log := zerolog.New(out) + + log.Info().Str("foo", "bar").Msg("Hello World") + // Output: [INFO ] Hello World foo=bar +} + +func TestConsoleLogger(t *testing.T) { + t.Run("Numbers", func(t *testing.T) { + buf := &bytes.Buffer{} + log := zerolog.New(zerolog.ConsoleWriter{Out: buf, NoColor: true}) + log.Info(). + Float64("float", 1.23). + Uint64("small", 123). + Uint64("big", 1152921504606846976). + Msg("msg") + if got, want := strings.TrimSpace(buf.String()), " INF msg big=1152921504606846976 float=1.23 small=123"; got != want { + t.Errorf("\ngot:\n%s\nwant:\n%s", got, want) + } + }) +} + +func TestConsoleWriter(t *testing.T) { + t.Run("Default field formatter", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: true, PartsOrder: []string{"foo"}} + + _, err := w.Write([]byte(`{"foo" : "DEFAULT"}`)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := "DEFAULT foo=DEFAULT\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) + + t.Run("Write colorized", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: false} + + _, err := w.Write([]byte(`{"level" : "warn", "message" : "Foobar"}`)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := "\x1b[2m\x1b[0m \x1b[31mWRN\x1b[0m Foobar\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) + + t.Run("Write fields", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: true} + + d := time.Unix(0, 0).UTC().Format(time.RFC3339) + _, err := w.Write([]byte(`{"time" : "` + d + `", "level" : "debug", "message" : "Foobar", "foo" : "bar"}`)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := "12:00AM DBG Foobar foo=bar\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) + + t.Run("Write error field", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: true} + + d := time.Unix(0, 0).UTC().Format(time.RFC3339) + evt := `{"time" : "` + d + `", "level" : "error", "message" : "Foobar", "aaa" : "bbb", "error" : "Error"}` + // t.Log(evt) + + _, err := w.Write([]byte(evt)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := "12:00AM ERR Foobar error=Error aaa=bbb\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) + + t.Run("Write caller field", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: true} + + cwd, err := os.Getwd() + if err != nil { + t.Fatalf("Cannot get working directory: %s", err) + } + + d := time.Unix(0, 0).UTC().Format(time.RFC3339) + evt := `{"time" : "` + d + `", "level" : "debug", "message" : "Foobar", "foo" : "bar", "caller" : "` + cwd + `/foo/bar.go"}` + // t.Log(evt) + + _, err = w.Write([]byte(evt)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := "12:00AM DBG foo/bar.go > Foobar foo=bar\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) + + t.Run("Write JSON field", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: true} + + evt := `{"level" : "debug", "message" : "Foobar", "foo" : [1, 2, 3], "bar" : true}` + // t.Log(evt) + + _, err := w.Write([]byte(evt)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := " DBG Foobar bar=true foo=[1,2,3]\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) +} + +func TestConsoleWriterConfiguration(t *testing.T) { + t.Run("Sets TimeFormat", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: true, TimeFormat: time.RFC3339} + + d := time.Unix(0, 0).UTC().Format(time.RFC3339) + evt := `{"time" : "` + d + `", "level" : "info", "message" : "Foobar"}` + + _, err := w.Write([]byte(evt)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := "1970-01-01T00:00:00Z INF Foobar\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) + + t.Run("Sets PartsOrder", func(t *testing.T) { + buf := &bytes.Buffer{} + w := zerolog.ConsoleWriter{Out: buf, NoColor: true, PartsOrder: []string{"message", "level"}} + + evt := `{"level" : "info", "message" : "Foobar"}` + _, err := w.Write([]byte(evt)) + if err != nil { + t.Errorf("Unexpected error when writing output: %s", err) + } + + expectedOutput := "Foobar INF\n" + actualOutput := buf.String() + if actualOutput != expectedOutput { + t.Errorf("Unexpected output %q, want: %q", actualOutput, expectedOutput) + } + }) +} + +func BenchmarkConsoleWriter(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + + var msg = []byte(`{"level" : "info", "foo" : "bar", "message" : "HELLO", "time" : "1990-01-01"}`) + + w := zerolog.ConsoleWriter{Out: ioutil.Discard, NoColor: false} + + for i := 0; i < b.N; i++ { + w.Write(msg) + } +} diff --git a/context.go b/context.go index b52bb0e..f807802 100644 --- a/context.go +++ b/context.go @@ -2,9 +2,8 @@ package zerolog import ( "io/ioutil" + "net" "time" - - "github.com/rs/zerolog/internal/json" ) // Context configures a new sub-logger with contextual fields. @@ -25,9 +24,9 @@ func (c Context) Fields(fields map[string]interface{}) Context { // Dict adds the field key with the dict to the logger context. func (c Context) Dict(key string, dict *Event) Context { - dict.buf = append(dict.buf, '}') - c.l.context = append(json.AppendKey(c.l.context, key), dict.buf...) - eventPool.Put(dict) + dict.buf = enc.AppendEndMarker(dict.buf) + c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) + putEvent(dict) return c } @@ -35,7 +34,7 @@ func (c Context) Dict(key string, dict *Event) Context { // Use zerolog.Arr() to create the array or pass a type that // implement the LogArrayMarshaler interface. func (c Context) Array(key string, arr LogArrayMarshaler) Context { - c.l.context = json.AppendKey(c.l.context, key) + c.l.context = enc.AppendKey(c.l.context, key) if arr, ok := arr.(*Array); ok { c.l.context = arr.write(c.l.context) return c @@ -53,208 +52,251 @@ func (c Context) Array(key string, arr LogArrayMarshaler) Context { // Object marshals an object that implement the LogObjectMarshaler interface. func (c Context) Object(key string, obj LogObjectMarshaler) Context { - e := newEvent(levelWriterAdapter{ioutil.Discard}, 0, true) + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) e.Object(key, obj) - e.buf[0] = ',' // A new event starts as an object, we want to embed it. - c.l.context = append(c.l.context, e.buf...) - eventPool.Put(e) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. +func (c Context) EmbedObject(obj LogObjectMarshaler) Context { + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e.EmbedObject(obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) return c } // Str adds the field key with val as a string to the logger context. func (c Context) Str(key, val string) Context { - c.l.context = json.AppendString(json.AppendKey(c.l.context, key), val) + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) return c } // Strs adds the field key with val as a string to the logger context. func (c Context) Strs(key string, vals []string) Context { - c.l.context = json.AppendStrings(json.AppendKey(c.l.context, key), vals) + c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) return c } // Bytes adds the field key with val as a []byte to the logger context. func (c Context) Bytes(key string, val []byte) Context { - c.l.context = json.AppendBytes(json.AppendKey(c.l.context, key), val) + c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) return c } -// AnErr adds the field key with err as a string to the logger context. +// Hex adds the field key with val as a hex string to the logger context. +func (c Context) Hex(key string, val []byte) Context { + c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) + return c +} + +// RawJSON adds already encoded JSON to context. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (c Context) RawJSON(key string, b []byte) Context { + c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) + return c +} + +// AnErr adds the field key with serialized err to the logger context. func (c Context) AnErr(key string, err error) Context { - if err != nil { - c.l.context = json.AppendError(json.AppendKey(c.l.context, key), err) + marshaled := ErrorMarshalFunc(err) + switch m := marshaled.(type) { + case nil: + return c + case LogObjectMarshaler: + return c.Object(key, m) + case error: + return c.Str(key, m.Error()) + case string: + return c.Str(key, m) + default: + return c.Interface(key, m) } - return c } -// Errs adds the field key with errs as an array of strings to the logger context. +// Errs adds the field key with errs as an array of serialized errors to the +// logger context. func (c Context) Errs(key string, errs []error) Context { - c.l.context = json.AppendErrors(json.AppendKey(c.l.context, key), errs) - return c + arr := Arr() + for _, err := range errs { + marshaled := ErrorMarshalFunc(err) + switch m := marshaled.(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + arr = arr.Str(m.Error()) + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return c.Array(key, arr) } -// Err adds the field "error" with err as a string to the logger context. -// To customize the key name, change zerolog.ErrorFieldName. +// Err adds the field "error" with serialized err to the logger context. func (c Context) Err(err error) Context { - if err != nil { - c.l.context = json.AppendError(json.AppendKey(c.l.context, ErrorFieldName), err) - } - return c + return c.AnErr(ErrorFieldName, err) } // Bool adds the field key with val as a bool to the logger context. func (c Context) Bool(key string, b bool) Context { - c.l.context = json.AppendBool(json.AppendKey(c.l.context, key), b) + c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) return c } // Bools adds the field key with val as a []bool to the logger context. func (c Context) Bools(key string, b []bool) Context { - c.l.context = json.AppendBools(json.AppendKey(c.l.context, key), b) + c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) return c } // Int adds the field key with i as a int to the logger context. func (c Context) Int(key string, i int) Context { - c.l.context = json.AppendInt(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) return c } // Ints adds the field key with i as a []int to the logger context. func (c Context) Ints(key string, i []int) Context { - c.l.context = json.AppendInts(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) return c } // Int8 adds the field key with i as a int8 to the logger context. func (c Context) Int8(key string, i int8) Context { - c.l.context = json.AppendInt8(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) return c } // Ints8 adds the field key with i as a []int8 to the logger context. func (c Context) Ints8(key string, i []int8) Context { - c.l.context = json.AppendInts8(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) return c } // Int16 adds the field key with i as a int16 to the logger context. func (c Context) Int16(key string, i int16) Context { - c.l.context = json.AppendInt16(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) return c } // Ints16 adds the field key with i as a []int16 to the logger context. func (c Context) Ints16(key string, i []int16) Context { - c.l.context = json.AppendInts16(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) return c } // Int32 adds the field key with i as a int32 to the logger context. func (c Context) Int32(key string, i int32) Context { - c.l.context = json.AppendInt32(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) return c } // Ints32 adds the field key with i as a []int32 to the logger context. func (c Context) Ints32(key string, i []int32) Context { - c.l.context = json.AppendInts32(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) return c } // Int64 adds the field key with i as a int64 to the logger context. func (c Context) Int64(key string, i int64) Context { - c.l.context = json.AppendInt64(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) return c } // Ints64 adds the field key with i as a []int64 to the logger context. func (c Context) Ints64(key string, i []int64) Context { - c.l.context = json.AppendInts64(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) return c } // Uint adds the field key with i as a uint to the logger context. func (c Context) Uint(key string, i uint) Context { - c.l.context = json.AppendUint(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) return c } // Uints adds the field key with i as a []uint to the logger context. func (c Context) Uints(key string, i []uint) Context { - c.l.context = json.AppendUints(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) return c } // Uint8 adds the field key with i as a uint8 to the logger context. func (c Context) Uint8(key string, i uint8) Context { - c.l.context = json.AppendUint8(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) return c } // Uints8 adds the field key with i as a []uint8 to the logger context. func (c Context) Uints8(key string, i []uint8) Context { - c.l.context = json.AppendUints8(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) return c } // Uint16 adds the field key with i as a uint16 to the logger context. func (c Context) Uint16(key string, i uint16) Context { - c.l.context = json.AppendUint16(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) return c } // Uints16 adds the field key with i as a []uint16 to the logger context. func (c Context) Uints16(key string, i []uint16) Context { - c.l.context = json.AppendUints16(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) return c } // Uint32 adds the field key with i as a uint32 to the logger context. func (c Context) Uint32(key string, i uint32) Context { - c.l.context = json.AppendUint32(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) return c } // Uints32 adds the field key with i as a []uint32 to the logger context. func (c Context) Uints32(key string, i []uint32) Context { - c.l.context = json.AppendUints32(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) return c } // Uint64 adds the field key with i as a uint64 to the logger context. func (c Context) Uint64(key string, i uint64) Context { - c.l.context = json.AppendUint64(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) return c } // Uints64 adds the field key with i as a []uint64 to the logger context. func (c Context) Uints64(key string, i []uint64) Context { - c.l.context = json.AppendUints64(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) return c } // Float32 adds the field key with f as a float32 to the logger context. func (c Context) Float32(key string, f float32) Context { - c.l.context = json.AppendFloat32(json.AppendKey(c.l.context, key), f) + c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) return c } // Floats32 adds the field key with f as a []float32 to the logger context. func (c Context) Floats32(key string, f []float32) Context { - c.l.context = json.AppendFloats32(json.AppendKey(c.l.context, key), f) + c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) return c } // Float64 adds the field key with f as a float64 to the logger context. func (c Context) Float64(key string, f float64) Context { - c.l.context = json.AppendFloat64(json.AppendKey(c.l.context, key), f) + c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) return c } // Floats64 adds the field key with f as a []float64 to the logger context. func (c Context) Floats64(key string, f []float64) Context { - c.l.context = json.AppendFloats64(json.AppendKey(c.l.context, key), f) + c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) return c } @@ -268,6 +310,8 @@ var th = timestampHook{} // Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key. // To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Context has one already. func (c Context) Timestamp() Context { c.l = c.l.Hook(th) return c @@ -275,38 +319,39 @@ func (c Context) Timestamp() Context { // Time adds the field key with t formated as string using zerolog.TimeFieldFormat. func (c Context) Time(key string, t time.Time) Context { - c.l.context = json.AppendTime(json.AppendKey(c.l.context, key), t, TimeFieldFormat) + c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) return c } // Times adds the field key with t formated as string using zerolog.TimeFieldFormat. func (c Context) Times(key string, t []time.Time) Context { - c.l.context = json.AppendTimes(json.AppendKey(c.l.context, key), t, TimeFieldFormat) + c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) return c } // Dur adds the fields key with d divided by unit and stored as a float. func (c Context) Dur(key string, d time.Duration) Context { - c.l.context = json.AppendDuration(json.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) return c } // Durs adds the fields key with d divided by unit and stored as a float. func (c Context) Durs(key string, d []time.Duration) Context { - c.l.context = json.AppendDurations(json.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) return c } // Interface adds the field key with obj marshaled using reflection. func (c Context) Interface(key string, i interface{}) Context { - c.l.context = json.AppendInterface(json.AppendKey(c.l.context, key), i) + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) return c } type callerHook struct{} func (ch callerHook) Run(e *Event, level Level, msg string) { - e.caller(4) + // Three extra frames to skip (added by hook infra). + e.caller(CallerSkipFrameCount + 3) } var ch = callerHook{} @@ -323,10 +368,28 @@ func (sh stackTraceHook) Run(e *Event, level Level, msg string) { e.Stack() } -var sh = callerHook{} +var sh = stackTraceHook{} // Stack enables stack trace printing for the error passed to Err(). func (c Context) Stack() Context { c.l = c.l.Hook(sh) return c } + +// IPAddr adds IPv4 or IPv6 Address to the context +func (c Context) IPAddr(key string, ip net.IP) Context { + c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) + return c +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context +func (c Context) IPPrefix(key string, pfx net.IPNet) Context { + c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) + return c +} + +// MACAddr adds MAC address to the context +func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { + c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) + return c +} diff --git a/ctx.go b/ctx.go index 04d8a11..2b7a682 100644 --- a/ctx.go +++ b/ctx.go @@ -2,38 +2,39 @@ package zerolog import ( "context" - "io/ioutil" ) var disabledLogger *Logger func init() { - l := New(ioutil.Discard).Level(Disabled) + l := Nop() disabledLogger = &l } type ctxKey struct{} // WithContext returns a copy of ctx with l associated. If an instance of Logger -// is already in the context, the pointer to this logger is updated with l. +// is already in the context, the context is not updated. // // For instance, to add a field to an existing logger in the context, use this // notation: // // ctx := r.Context() // l := zerolog.Ctx(ctx) -// ctx = l.With().Str("foo", "bar").WithContext(ctx) -func (l Logger) WithContext(ctx context.Context) context.Context { +// l.UpdateContext(func(c Context) Context { +// return c.Str("bar", "baz") +// }) +func (l *Logger) WithContext(ctx context.Context) context.Context { if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok { - // Update existing pointer. - *lp = l - return ctx - } - if l.level == Disabled { + if lp == l { + // Do not store same logger. + return ctx + } + } else if l.level == Disabled { // Do not store disabled logger. return ctx } - return context.WithValue(ctx, ctxKey{}, &l) + return context.WithValue(ctx, ctxKey{}, l) } // Ctx returns the Logger associated with the ctx. If no logger diff --git a/ctx_test.go b/ctx_test.go index 942b723..646cd0f 100644 --- a/ctx_test.go +++ b/ctx_test.go @@ -30,18 +30,34 @@ func TestCtx(t *testing.T) { } func TestCtxDisabled(t *testing.T) { - ctx := disabledLogger.WithContext(context.Background()) + dl := New(ioutil.Discard).Level(Disabled) + ctx := dl.WithContext(context.Background()) if ctx != context.Background() { t.Error("WithContext stored a disabled logger") } - ctx = New(ioutil.Discard).WithContext(ctx) - if reflect.DeepEqual(Ctx(ctx), disabledLogger) { + l := New(ioutil.Discard).With().Str("foo", "bar").Logger() + ctx = l.WithContext(ctx) + if Ctx(ctx) != &l { t.Error("WithContext did not store logger") } - ctx = disabledLogger.WithContext(ctx) - if !reflect.DeepEqual(Ctx(ctx), disabledLogger) { - t.Error("WithContext did not update logger pointer with disabled logger") + l.UpdateContext(func(c Context) Context { + return c.Str("bar", "baz") + }) + ctx = l.WithContext(ctx) + if Ctx(ctx) != &l { + t.Error("WithContext did not store updated logger") + } + + l = l.Level(DebugLevel) + ctx = l.WithContext(ctx) + if Ctx(ctx) != &l { + t.Error("WithContext did not store copied logger") + } + + ctx = dl.WithContext(ctx) + if Ctx(ctx) != &dl { + t.Error("WithContext did not overide logger with a disabled logger") } } diff --git a/diode/diode.go b/diode/diode.go new file mode 100644 index 0000000..07224fa --- /dev/null +++ b/diode/diode.go @@ -0,0 +1,114 @@ +// Package diode provides a thread-safe, lock-free, non-blocking io.Writer +// wrapper. +package diode + +import ( + "context" + "io" + "sync" + "time" + + "github.com/rs/zerolog/diode/internal/diodes" +) + +var bufPool = &sync.Pool{ + New: func() interface{} { + return make([]byte, 0, 500) + }, +} + +type Alerter func(missed int) + +type diodeFetcher interface { + diodes.Diode + Next() diodes.GenericDataType +} + +// Writer is a io.Writer wrapper that uses a diode to make Write lock-free, +// non-blocking and thread safe. +type Writer struct { + w io.Writer + d diodeFetcher + c context.CancelFunc + done chan struct{} +} + +// NewWriter creates a writer wrapping w with a many-to-one diode in order to +// never block log producers and drop events if the writer can't keep up with +// the flow of data. +// +// Use a diode.Writer when +// +// wr := diode.NewWriter(w, 1000, 0, func(missed int) { +// log.Printf("Dropped %d messages", missed) +// }) +// log := zerolog.New(wr) +// +// If pollInterval is greater than 0, a poller is used otherwise a waiter is +// used. +// +// See code.cloudfoundry.org/go-diodes for more info on diode. +func NewWriter(w io.Writer, size int, poolInterval time.Duration, f Alerter) Writer { + ctx, cancel := context.WithCancel(context.Background()) + dw := Writer{ + w: w, + c: cancel, + done: make(chan struct{}), + } + if f == nil { + f = func(int) {} + } + d := diodes.NewManyToOne(size, diodes.AlertFunc(f)) + if poolInterval > 0 { + dw.d = diodes.NewPoller(d, + diodes.WithPollingInterval(poolInterval), + diodes.WithPollingContext(ctx)) + } else { + dw.d = diodes.NewWaiter(d, + diodes.WithWaiterContext(ctx)) + } + go dw.poll() + return dw +} + +func (dw Writer) Write(p []byte) (n int, err error) { + // p is pooled in zerolog so we can't hold it passed this call, hence the + // copy. + p = append(bufPool.Get().([]byte), p...) + dw.d.Set(diodes.GenericDataType(&p)) + return len(p), nil +} + +// Close releases the diode poller and call Close on the wrapped writer if +// io.Closer is implemented. +func (dw Writer) Close() error { + dw.c() + <-dw.done + if w, ok := dw.w.(io.Closer); ok { + return w.Close() + } + return nil +} + +func (dw Writer) poll() { + defer close(dw.done) + for { + d := dw.d.Next() + if d == nil { + return + } + p := *(*[]byte)(d) + dw.w.Write(p) + + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(p) <= maxSize { + bufPool.Put(p[:0]) + } + } +} diff --git a/diode/diode_example_test.go b/diode/diode_example_test.go new file mode 100644 index 0000000..3540db6 --- /dev/null +++ b/diode/diode_example_test.go @@ -0,0 +1,23 @@ +// +build !binary_log + +package diode_test + +import ( + "fmt" + "os" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/diode" +) + +func ExampleNewWriter() { + w := diode.NewWriter(os.Stdout, 1000, 0, func(missed int) { + fmt.Printf("Dropped %d messages\n", missed) + }) + log := zerolog.New(w) + log.Print("test") + + w.Close() + + // Output: {"level":"debug","message":"test"} +} diff --git a/diode/diode_test.go b/diode/diode_test.go new file mode 100644 index 0000000..098bd44 --- /dev/null +++ b/diode/diode_test.go @@ -0,0 +1,54 @@ +package diode_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/diode" + "github.com/rs/zerolog/internal/cbor" +) + +func TestNewWriter(t *testing.T) { + buf := bytes.Buffer{} + w := diode.NewWriter(&buf, 1000, 0, func(missed int) { + fmt.Printf("Dropped %d messages\n", missed) + }) + log := zerolog.New(w) + log.Print("test") + + w.Close() + want := "{\"level\":\"debug\",\"message\":\"test\"}\n" + got := cbor.DecodeIfBinaryToString(buf.Bytes()) + if got != want { + t.Errorf("Diode New Writer Test failed. got:%s, want:%s!", got, want) + } +} + +func Benchmark(b *testing.B) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stderr) + benchs := map[string]time.Duration{ + "Waiter": 0, + "Pooler": 10 * time.Millisecond, + } + for name, interval := range benchs { + b.Run(name, func(b *testing.B) { + w := diode.NewWriter(ioutil.Discard, 100000, interval, nil) + log := zerolog.New(w) + defer w.Close() + + b.SetParallelism(1000) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + log.Print("test") + } + }) + }) + } +} diff --git a/diode/internal/diodes/README b/diode/internal/diodes/README new file mode 100644 index 0000000..6c4ec5f --- /dev/null +++ b/diode/internal/diodes/README @@ -0,0 +1 @@ +Copied from https://github.com/cloudfoundry/go-diodes to avoid test dependencies. diff --git a/diode/internal/diodes/many_to_one.go b/diode/internal/diodes/many_to_one.go new file mode 100644 index 0000000..0f562f7 --- /dev/null +++ b/diode/internal/diodes/many_to_one.go @@ -0,0 +1,130 @@ +package diodes + +import ( + "log" + "sync/atomic" + "unsafe" +) + +// ManyToOne diode is optimal for many writers (go-routines B-n) and a single +// reader (go-routine A). It is not thread safe for multiple readers. +type ManyToOne struct { + buffer []unsafe.Pointer + writeIndex uint64 + readIndex uint64 + alerter Alerter +} + +// NewManyToOne creates a new diode (ring buffer). The ManyToOne diode +// is optimzed for many writers (on go-routines B-n) and a single reader +// (on go-routine A). The alerter is invoked on the read's go-routine. It is +// called when it notices that the writer go-routine has passed it and wrote +// over data. A nil can be used to ignore alerts. +func NewManyToOne(size int, alerter Alerter) *ManyToOne { + if alerter == nil { + alerter = AlertFunc(func(int) {}) + } + + d := &ManyToOne{ + buffer: make([]unsafe.Pointer, size), + alerter: alerter, + } + + // Start write index at the value before 0 + // to allow the first write to use AddUint64 + // and still have a beginning index of 0 + d.writeIndex = ^d.writeIndex + return d +} + +// Set sets the data in the next slot of the ring buffer. +func (d *ManyToOne) Set(data GenericDataType) { + for { + writeIndex := atomic.AddUint64(&d.writeIndex, 1) + idx := writeIndex % uint64(len(d.buffer)) + old := atomic.LoadPointer(&d.buffer[idx]) + + if old != nil && + (*bucket)(old) != nil && + (*bucket)(old).seq > writeIndex-uint64(len(d.buffer)) { + log.Println("Diode set collision: consider using a larger diode") + continue + } + + newBucket := &bucket{ + data: data, + seq: writeIndex, + } + + if !atomic.CompareAndSwapPointer(&d.buffer[idx], old, unsafe.Pointer(newBucket)) { + log.Println("Diode set collision: consider using a larger diode") + continue + } + + return + } +} + +// TryNext will attempt to read from the next slot of the ring buffer. +// If there is not data available, it will return (nil, false). +func (d *ManyToOne) TryNext() (data GenericDataType, ok bool) { + // Read a value from the ring buffer based on the readIndex. + idx := d.readIndex % uint64(len(d.buffer)) + result := (*bucket)(atomic.SwapPointer(&d.buffer[idx], nil)) + + // When the result is nil that means the writer has not had the + // opportunity to write a value into the diode. This value must be ignored + // and the read head must not increment. + if result == nil { + return nil, false + } + + // When the seq value is less than the current read index that means a + // value was read from idx that was previously written but has since has + // been dropped. This value must be ignored and the read head must not + // increment. + // + // The simulation for this scenario assumes the fast forward occurred as + // detailed below. + // + // 5. The reader reads again getting seq 5. It then reads again expecting + // seq 6 but gets seq 2. This is a read of a stale value that was + // effectively "dropped" so the read fails and the read head stays put. + // `| 4 | 5 | 2 | 3 |` r: 7, w: 6 + // + if result.seq < d.readIndex { + return nil, false + } + + // When the seq value is greater than the current read index that means a + // value was read from idx that overwrote the value that was expected to + // be at this idx. This happens when the writer has lapped the reader. The + // reader needs to catch up to the writer so it moves its write head to + // the new seq, effectively dropping the messages that were not read in + // between the two values. + // + // Here is a simulation of this scenario: + // + // 1. Both the read and write heads start at 0. + // `| nil | nil | nil | nil |` r: 0, w: 0 + // 2. The writer fills the buffer. + // `| 0 | 1 | 2 | 3 |` r: 0, w: 4 + // 3. The writer laps the read head. + // `| 4 | 5 | 2 | 3 |` r: 0, w: 6 + // 4. The reader reads the first value, expecting a seq of 0 but reads 4, + // this forces the reader to fast forward to 5. + // `| 4 | 5 | 2 | 3 |` r: 5, w: 6 + // + if result.seq > d.readIndex { + dropped := result.seq - d.readIndex + d.readIndex = result.seq + d.alerter.Alert(int(dropped)) + } + + // Only increment read index if a regular read occurred (where seq was + // equal to readIndex) or a value was read that caused a fast forward + // (where seq was greater than readIndex). + // + d.readIndex++ + return result.data, true +} diff --git a/diode/internal/diodes/one_to_one.go b/diode/internal/diodes/one_to_one.go new file mode 100644 index 0000000..aaf66d1 --- /dev/null +++ b/diode/internal/diodes/one_to_one.go @@ -0,0 +1,129 @@ +package diodes + +import ( + "sync/atomic" + "unsafe" +) + +// GenericDataType is the data type the diodes operate on. +type GenericDataType unsafe.Pointer + +// Alerter is used to report how many values were overwritten since the +// last write. +type Alerter interface { + Alert(missed int) +} + +// AlertFunc type is an adapter to allow the use of ordinary functions as +// Alert handlers. +type AlertFunc func(missed int) + +// Alert calls f(missed) +func (f AlertFunc) Alert(missed int) { + f(missed) +} + +type bucket struct { + data GenericDataType + seq uint64 // seq is the recorded write index at the time of writing +} + +// OneToOne diode is meant to be used by a single reader and a single writer. +// It is not thread safe if used otherwise. +type OneToOne struct { + buffer []unsafe.Pointer + writeIndex uint64 + readIndex uint64 + alerter Alerter +} + +// NewOneToOne creates a new diode is meant to be used by a single reader and +// a single writer. The alerter is invoked on the read's go-routine. It is +// called when it notices that the writer go-routine has passed it and wrote +// over data. A nil can be used to ignore alerts. +func NewOneToOne(size int, alerter Alerter) *OneToOne { + if alerter == nil { + alerter = AlertFunc(func(int) {}) + } + + return &OneToOne{ + buffer: make([]unsafe.Pointer, size), + alerter: alerter, + } +} + +// Set sets the data in the next slot of the ring buffer. +func (d *OneToOne) Set(data GenericDataType) { + idx := d.writeIndex % uint64(len(d.buffer)) + + newBucket := &bucket{ + data: data, + seq: d.writeIndex, + } + d.writeIndex++ + + atomic.StorePointer(&d.buffer[idx], unsafe.Pointer(newBucket)) +} + +// TryNext will attempt to read from the next slot of the ring buffer. +// If there is no data available, it will return (nil, false). +func (d *OneToOne) TryNext() (data GenericDataType, ok bool) { + // Read a value from the ring buffer based on the readIndex. + idx := d.readIndex % uint64(len(d.buffer)) + result := (*bucket)(atomic.SwapPointer(&d.buffer[idx], nil)) + + // When the result is nil that means the writer has not had the + // opportunity to write a value into the diode. This value must be ignored + // and the read head must not increment. + if result == nil { + return nil, false + } + + // When the seq value is less than the current read index that means a + // value was read from idx that was previously written but has since has + // been dropped. This value must be ignored and the read head must not + // increment. + // + // The simulation for this scenario assumes the fast forward occurred as + // detailed below. + // + // 5. The reader reads again getting seq 5. It then reads again expecting + // seq 6 but gets seq 2. This is a read of a stale value that was + // effectively "dropped" so the read fails and the read head stays put. + // `| 4 | 5 | 2 | 3 |` r: 7, w: 6 + // + if result.seq < d.readIndex { + return nil, false + } + + // When the seq value is greater than the current read index that means a + // value was read from idx that overwrote the value that was expected to + // be at this idx. This happens when the writer has lapped the reader. The + // reader needs to catch up to the writer so it moves its write head to + // the new seq, effectively dropping the messages that were not read in + // between the two values. + // + // Here is a simulation of this scenario: + // + // 1. Both the read and write heads start at 0. + // `| nil | nil | nil | nil |` r: 0, w: 0 + // 2. The writer fills the buffer. + // `| 0 | 1 | 2 | 3 |` r: 0, w: 4 + // 3. The writer laps the read head. + // `| 4 | 5 | 2 | 3 |` r: 0, w: 6 + // 4. The reader reads the first value, expecting a seq of 0 but reads 4, + // this forces the reader to fast forward to 5. + // `| 4 | 5 | 2 | 3 |` r: 5, w: 6 + // + if result.seq > d.readIndex { + dropped := result.seq - d.readIndex + d.readIndex = result.seq + d.alerter.Alert(int(dropped)) + } + + // Only increment read index if a regular read occurred (where seq was + // equal to readIndex) or a value was read that caused a fast forward + // (where seq was greater than readIndex). + d.readIndex++ + return result.data, true +} diff --git a/diode/internal/diodes/poller.go b/diode/internal/diodes/poller.go new file mode 100644 index 0000000..d317a23 --- /dev/null +++ b/diode/internal/diodes/poller.go @@ -0,0 +1,80 @@ +package diodes + +import ( + "context" + "time" +) + +// Diode is any implementation of a diode. +type Diode interface { + Set(GenericDataType) + TryNext() (GenericDataType, bool) +} + +// Poller will poll a diode until a value is available. +type Poller struct { + Diode + interval time.Duration + ctx context.Context +} + +// PollerConfigOption can be used to setup the poller. +type PollerConfigOption func(*Poller) + +// WithPollingInterval sets the interval at which the diode is queried +// for new data. The default is 10ms. +func WithPollingInterval(interval time.Duration) PollerConfigOption { + return PollerConfigOption(func(c *Poller) { + c.interval = interval + }) +} + +// WithPollingContext sets the context to cancel any retrieval (Next()). It +// will not change any results for adding data (Set()). Default is +// context.Background(). +func WithPollingContext(ctx context.Context) PollerConfigOption { + return PollerConfigOption(func(c *Poller) { + c.ctx = ctx + }) +} + +// NewPoller returns a new Poller that wraps the given diode. +func NewPoller(d Diode, opts ...PollerConfigOption) *Poller { + p := &Poller{ + Diode: d, + interval: 10 * time.Millisecond, + ctx: context.Background(), + } + + for _, o := range opts { + o(p) + } + + return p +} + +// Next polls the diode until data is available or until the context is done. +// If the context is done, then nil will be returned. +func (p *Poller) Next() GenericDataType { + for { + data, ok := p.Diode.TryNext() + if !ok { + if p.isDone() { + return nil + } + + time.Sleep(p.interval) + continue + } + return data + } +} + +func (p *Poller) isDone() bool { + select { + case <-p.ctx.Done(): + return true + default: + return false + } +} diff --git a/diode/internal/diodes/waiter.go b/diode/internal/diodes/waiter.go new file mode 100644 index 0000000..a3770ff --- /dev/null +++ b/diode/internal/diodes/waiter.go @@ -0,0 +1,83 @@ +package diodes + +import ( + "context" + "sync" +) + +// Waiter will use a conditional mutex to alert the reader to when data is +// available. +type Waiter struct { + Diode + mu sync.Mutex + c *sync.Cond + ctx context.Context +} + +// WaiterConfigOption can be used to setup the waiter. +type WaiterConfigOption func(*Waiter) + +// WithWaiterContext sets the context to cancel any retrieval (Next()). It +// will not change any results for adding data (Set()). Default is +// context.Background(). +func WithWaiterContext(ctx context.Context) WaiterConfigOption { + return WaiterConfigOption(func(c *Waiter) { + c.ctx = ctx + }) +} + +// NewWaiter returns a new Waiter that wraps the given diode. +func NewWaiter(d Diode, opts ...WaiterConfigOption) *Waiter { + w := new(Waiter) + w.Diode = d + w.c = sync.NewCond(&w.mu) + w.ctx = context.Background() + + for _, opt := range opts { + opt(w) + } + + go func() { + <-w.ctx.Done() + w.c.Broadcast() + }() + + return w +} + +// Set invokes the wrapped diode's Set with the given data and uses Broadcast +// to wake up any readers. +func (w *Waiter) Set(data GenericDataType) { + w.Diode.Set(data) + w.c.Broadcast() +} + +// Next returns the next data point on the wrapped diode. If there is not any +// new data, it will Wait for set to be called or the context to be done. +// If the context is done, then nil will be returned. +func (w *Waiter) Next() GenericDataType { + w.mu.Lock() + defer w.mu.Unlock() + + for { + data, ok := w.Diode.TryNext() + if !ok { + if w.isDone() { + return nil + } + + w.c.Wait() + continue + } + return data + } +} + +func (w *Waiter) isDone() bool { + select { + case <-w.ctx.Done(): + return true + default: + return false + } +} diff --git a/encoder.go b/encoder.go new file mode 100644 index 0000000..09b24e8 --- /dev/null +++ b/encoder.go @@ -0,0 +1,56 @@ +package zerolog + +import ( + "net" + "time" +) + +type encoder interface { + AppendArrayDelim(dst []byte) []byte + AppendArrayEnd(dst []byte) []byte + AppendArrayStart(dst []byte) []byte + AppendBeginMarker(dst []byte) []byte + AppendBool(dst []byte, val bool) []byte + AppendBools(dst []byte, vals []bool) []byte + AppendBytes(dst, s []byte) []byte + AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte + AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte + AppendEndMarker(dst []byte) []byte + AppendFloat32(dst []byte, val float32) []byte + AppendFloat64(dst []byte, val float64) []byte + AppendFloats32(dst []byte, vals []float32) []byte + AppendFloats64(dst []byte, vals []float64) []byte + AppendHex(dst, s []byte) []byte + AppendIPAddr(dst []byte, ip net.IP) []byte + AppendIPPrefix(dst []byte, pfx net.IPNet) []byte + AppendInt(dst []byte, val int) []byte + AppendInt16(dst []byte, val int16) []byte + AppendInt32(dst []byte, val int32) []byte + AppendInt64(dst []byte, val int64) []byte + AppendInt8(dst []byte, val int8) []byte + AppendInterface(dst []byte, i interface{}) []byte + AppendInts(dst []byte, vals []int) []byte + AppendInts16(dst []byte, vals []int16) []byte + AppendInts32(dst []byte, vals []int32) []byte + AppendInts64(dst []byte, vals []int64) []byte + AppendInts8(dst []byte, vals []int8) []byte + AppendKey(dst []byte, key string) []byte + AppendLineBreak(dst []byte) []byte + AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte + AppendNil(dst []byte) []byte + AppendObjectData(dst []byte, o []byte) []byte + AppendString(dst []byte, s string) []byte + AppendStrings(dst []byte, vals []string) []byte + AppendTime(dst []byte, t time.Time, format string) []byte + AppendTimes(dst []byte, vals []time.Time, format string) []byte + AppendUint(dst []byte, val uint) []byte + AppendUint16(dst []byte, val uint16) []byte + AppendUint32(dst []byte, val uint32) []byte + AppendUint64(dst []byte, val uint64) []byte + AppendUint8(dst []byte, val uint8) []byte + AppendUints(dst []byte, vals []uint) []byte + AppendUints16(dst []byte, vals []uint16) []byte + AppendUints32(dst []byte, vals []uint32) []byte + AppendUints64(dst []byte, vals []uint64) []byte + AppendUints8(dst []byte, vals []uint8) []byte +} diff --git a/encoder_cbor.go b/encoder_cbor.go new file mode 100644 index 0000000..f8d3fe9 --- /dev/null +++ b/encoder_cbor.go @@ -0,0 +1,35 @@ +// +build binary_log + +package zerolog + +// This file contains bindings to do binary encoding. + +import ( + "github.com/rs/zerolog/internal/cbor" +) + +var ( + _ encoder = (*cbor.Encoder)(nil) + + enc = cbor.Encoder{} +) + +func appendJSON(dst []byte, j []byte) []byte { + return cbor.AppendEmbeddedJSON(dst, j) +} + +// decodeIfBinaryToString - converts a binary formatted log msg to a +// JSON formatted String Log message. +func decodeIfBinaryToString(in []byte) string { + return cbor.DecodeIfBinaryToString(in) +} + +func decodeObjectToStr(in []byte) string { + return cbor.DecodeObjectToStr(in) +} + +// decodeIfBinaryToBytes - converts a binary formatted log msg to a +// JSON formatted Bytes Log message. +func decodeIfBinaryToBytes(in []byte) []byte { + return cbor.DecodeIfBinaryToBytes(in) +} diff --git a/encoder_json.go b/encoder_json.go new file mode 100644 index 0000000..fe580f5 --- /dev/null +++ b/encoder_json.go @@ -0,0 +1,32 @@ +// +build !binary_log + +package zerolog + +// encoder_json.go file contains bindings to generate +// JSON encoded byte stream. + +import ( + "github.com/rs/zerolog/internal/json" +) + +var ( + _ encoder = (*json.Encoder)(nil) + + enc = json.Encoder{} +) + +func appendJSON(dst []byte, j []byte) []byte { + return append(dst, j...) +} + +func decodeIfBinaryToString(in []byte) string { + return string(in) +} + +func decodeObjectToStr(in []byte) string { + return string(in) +} + +func decodeIfBinaryToBytes(in []byte) []byte { + return in +} diff --git a/event.go b/event.go index 05c4af9..d9462b0 100644 --- a/event.go +++ b/event.go @@ -2,14 +2,12 @@ package zerolog import ( "fmt" - "io/ioutil" + "net" "os" "runtime" "strconv" "sync" "time" - - "github.com/rs/zerolog/internal/json" ) var eventPool = &sync.Pool{ @@ -29,7 +27,20 @@ type Event struct { done func(msg string) stack bool // enable error stack trace ch []Hook // hooks from context - h []Hook +} + +func putEvent(e *Event) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(e.buf) > maxSize { + return + } + eventPool.Put(e) } // LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface @@ -44,14 +55,11 @@ type LogArrayMarshaler interface { MarshalZerologArray(a *Array) } -func newEvent(w LevelWriter, level Level, enabled bool) *Event { - if !enabled { - return &Event{} - } +func newEvent(w LevelWriter, level Level) *Event { e := eventPool.Get().(*Event) - e.buf = e.buf[:1] - e.h = e.h[:0] - e.buf[0] = '{' + e.buf = e.buf[:0] + e.ch = nil + e.buf = enc.AppendBeginMarker(e.buf) e.w = w e.level = level return e @@ -61,16 +69,30 @@ func (e *Event) write() (err error) { if e == nil { return nil } - e.buf = append(e.buf, '}', '\n') - _, err = e.w.WriteLevel(e.level, e.buf) - eventPool.Put(e) + if e.level != Disabled { + e.buf = enc.AppendEndMarker(e.buf) + e.buf = enc.AppendLineBreak(e.buf) + if e.w != nil { + _, err = e.w.WriteLevel(e.level, e.buf) + } + } + putEvent(e) return } // Enabled return false if the *Event is going to be filtered out by // log level or sampling. func (e *Event) Enabled() bool { - return e != nil + return e != nil && e.level != Disabled +} + +// Discard disables the event so Msg(f) won't print it. +func (e *Event) Discard() *Event { + if e == nil { + return e + } + e.level = Disabled + return nil } // Msg sends the *Event with msg added as the message field if not empty. @@ -81,31 +103,7 @@ func (e *Event) Msg(msg string) { if e == nil { return } - if len(e.ch) > 0 { - e.ch[0].Run(e, e.level, msg) - if len(e.ch) > 1 { - for _, hook := range e.ch[1:] { - hook.Run(e, e.level, msg) - } - } - } - if len(e.h) > 0 { - e.h[0].Run(e, e.level, msg) - if len(e.h) > 1 { - for _, hook := range e.h[1:] { - hook.Run(e, e.level, msg) - } - } - } - if msg != "" { - e.buf = json.AppendString(json.AppendKey(e.buf, MessageFieldName), msg) - } - if e.done != nil { - defer e.done(msg) - } - if err := e.write(); err != nil { - fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v", err) - } + e.msg(msg) } // Msgf sends the event with formated msg added as the message field if not empty. @@ -116,7 +114,31 @@ func (e *Event) Msgf(format string, v ...interface{}) { if e == nil { return } - e.Msg(fmt.Sprintf(format, v...)) + e.msg(fmt.Sprintf(format, v...)) +} + +func (e *Event) msg(msg string) { + if len(e.ch) > 0 { + e.ch[0].Run(e, e.level, msg) + if len(e.ch) > 1 { + for _, hook := range e.ch[1:] { + hook.Run(e, e.level, msg) + } + } + } + if msg != "" { + e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) + } + if e.done != nil { + defer e.done(msg) + } + if err := e.write(); err != nil { + if ErrorHandler != nil { + ErrorHandler(err) + } else { + fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) + } + } } // Fields is a helper function to use a map to set fields using type assertion. @@ -134,8 +156,9 @@ func (e *Event) Dict(key string, dict *Event) *Event { if e == nil { return e } - e.buf = append(append(json.AppendKey(e.buf, key), dict.buf...), '}') - eventPool.Put(dict) + dict.buf = enc.AppendEndMarker(dict.buf) + e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) + putEvent(dict) return e } @@ -143,7 +166,7 @@ func (e *Event) Dict(key string, dict *Event) *Event { // Call usual field methods like Str, Int etc to add fields to this // event and give it as argument the *Event.Dict method. func Dict() *Event { - return newEvent(levelWriterAdapter{ioutil.Discard}, 0, true) + return newEvent(nil, 0) } // Array adds the field key with an array to the event context. @@ -153,7 +176,7 @@ func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { if e == nil { return e } - e.buf = json.AppendKey(e.buf, key) + e.buf = enc.AppendKey(e.buf, key) var a *Array if aa, ok := arr.(*Array); ok { a = aa @@ -166,17 +189,9 @@ func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { } func (e *Event) appendObject(obj LogObjectMarshaler) { - pos := len(e.buf) + e.buf = enc.AppendBeginMarker(e.buf) obj.MarshalZerologObject(e) - if pos < len(e.buf) { - // As MarshalZerologObject will use event API, the first field will be - // preceded by a comma. If at least one field has been added (buf grew), - // we replace this coma by the opening bracket. - e.buf[pos] = '{' - } else { - e.buf = append(e.buf, '{') - } - e.buf = append(e.buf, '}') + e.buf = enc.AppendEndMarker(e.buf) } // Object marshals an object that implement the LogObjectMarshaler interface. @@ -184,17 +199,26 @@ func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { if e == nil { return e } - e.buf = json.AppendKey(e.buf, key) + e.buf = enc.AppendKey(e.buf, key) e.appendObject(obj) return e } +// Object marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + obj.MarshalZerologObject(e) + return e +} + // Str adds the field key with val as a string to the *Event context. func (e *Event) Str(key, val string) *Event { if e == nil { return e } - e.buf = json.AppendString(json.AppendKey(e.buf, key), val) + e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) return e } @@ -203,7 +227,7 @@ func (e *Event) Strs(key string, vals []string) *Event { if e == nil { return e } - e.buf = json.AppendStrings(json.AppendKey(e.buf, key), vals) + e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) return e } @@ -215,34 +239,77 @@ func (e *Event) Bytes(key string, val []byte) *Event { if e == nil { return e } - e.buf = json.AppendBytes(json.AppendKey(e.buf, key), val) + e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) return e } -// AnErr adds the field key with err as a string to the *Event context. +// Hex adds the field key with val as a hex string to the *Event context. +func (e *Event) Hex(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) + return e +} + +// RawJSON adds already encoded JSON to the log line under key. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (e *Event) RawJSON(key string, b []byte) *Event { + if e == nil { + return e + } + e.buf = appendJSON(enc.AppendKey(e.buf, key), b) + return e +} + +// AnErr adds the field key with serialized err to the *Event context. // If err is nil, no field is added. func (e *Event) AnErr(key string, err error) *Event { if e == nil { return e } - if err != nil { - e.buf = json.AppendError(json.AppendKey(e.buf, key), err) + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return e + case LogObjectMarshaler: + return e.Object(key, m) + case error: + return e.Str(key, m.Error()) + case string: + return e.Str(key, m) + default: + return e.Interface(key, m) } - return e } -// Errs adds the field key with errs as an array of strings to the *Event context. -// If err is nil, no field is added. +// Errs adds the field key with errs as an array of serialized errors to the +// *Event context. func (e *Event) Errs(key string, errs []error) *Event { if e == nil { return e } - e.buf = json.AppendErrors(json.AppendKey(e.buf, key), errs) - return e + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + arr = arr.Err(m) + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return e.Array(key, arr) } -// Err adds the field "error" with err as a string to the *Event context. +// Err adds the field "error" with serialized err to the *Event context. // If err is nil, no field is added. +// To customize the key name, change zerolog.ErrorFieldName. // // To customize the key name, change zerolog.ErrorFieldName. // @@ -254,15 +321,19 @@ func (e *Event) Err(err error) *Event { return e } if e.stack && ErrorStackMarshaler != nil { - s := ErrorStackMarshaler(err) - if len(s) > 0 { - e.buf = append(json.AppendKey(e.buf, ErrorStackFieldName), s...) + switch m := ErrorStackMarshaler(err).(type) { + case nil: + case LogObjectMarshaler: + e.Object(ErrorStackFieldName, m) + case error: + e.Str(ErrorStackFieldName, m.Error()) + case string: + e.Str(ErrorStackFieldName, m) + default: + e.Interface(ErrorStackFieldName, m) } } - if err != nil { - e.buf = json.AppendError(json.AppendKey(e.buf, ErrorFieldName), err) - } - return e + return e.AnErr(ErrorFieldName, err) } // Stack enables stack trace printing for the error passed to Err(). @@ -280,7 +351,7 @@ func (e *Event) Bool(key string, b bool) *Event { if e == nil { return e } - e.buf = json.AppendBool(json.AppendKey(e.buf, key), b) + e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) return e } @@ -289,7 +360,7 @@ func (e *Event) Bools(key string, b []bool) *Event { if e == nil { return e } - e.buf = json.AppendBools(json.AppendKey(e.buf, key), b) + e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) return e } @@ -298,7 +369,7 @@ func (e *Event) Int(key string, i int) *Event { if e == nil { return e } - e.buf = json.AppendInt(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) return e } @@ -307,7 +378,7 @@ func (e *Event) Ints(key string, i []int) *Event { if e == nil { return e } - e.buf = json.AppendInts(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) return e } @@ -316,7 +387,7 @@ func (e *Event) Int8(key string, i int8) *Event { if e == nil { return e } - e.buf = json.AppendInt8(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) return e } @@ -325,7 +396,7 @@ func (e *Event) Ints8(key string, i []int8) *Event { if e == nil { return e } - e.buf = json.AppendInts8(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) return e } @@ -334,7 +405,7 @@ func (e *Event) Int16(key string, i int16) *Event { if e == nil { return e } - e.buf = json.AppendInt16(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) return e } @@ -343,7 +414,7 @@ func (e *Event) Ints16(key string, i []int16) *Event { if e == nil { return e } - e.buf = json.AppendInts16(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) return e } @@ -352,7 +423,7 @@ func (e *Event) Int32(key string, i int32) *Event { if e == nil { return e } - e.buf = json.AppendInt32(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) return e } @@ -361,7 +432,7 @@ func (e *Event) Ints32(key string, i []int32) *Event { if e == nil { return e } - e.buf = json.AppendInts32(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) return e } @@ -370,7 +441,7 @@ func (e *Event) Int64(key string, i int64) *Event { if e == nil { return e } - e.buf = json.AppendInt64(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) return e } @@ -379,7 +450,7 @@ func (e *Event) Ints64(key string, i []int64) *Event { if e == nil { return e } - e.buf = json.AppendInts64(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) return e } @@ -388,7 +459,7 @@ func (e *Event) Uint(key string, i uint) *Event { if e == nil { return e } - e.buf = json.AppendUint(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) return e } @@ -397,7 +468,7 @@ func (e *Event) Uints(key string, i []uint) *Event { if e == nil { return e } - e.buf = json.AppendUints(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) return e } @@ -406,7 +477,7 @@ func (e *Event) Uint8(key string, i uint8) *Event { if e == nil { return e } - e.buf = json.AppendUint8(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) return e } @@ -415,7 +486,7 @@ func (e *Event) Uints8(key string, i []uint8) *Event { if e == nil { return e } - e.buf = json.AppendUints8(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) return e } @@ -424,7 +495,7 @@ func (e *Event) Uint16(key string, i uint16) *Event { if e == nil { return e } - e.buf = json.AppendUint16(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) return e } @@ -433,7 +504,7 @@ func (e *Event) Uints16(key string, i []uint16) *Event { if e == nil { return e } - e.buf = json.AppendUints16(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) return e } @@ -442,7 +513,7 @@ func (e *Event) Uint32(key string, i uint32) *Event { if e == nil { return e } - e.buf = json.AppendUint32(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) return e } @@ -451,7 +522,7 @@ func (e *Event) Uints32(key string, i []uint32) *Event { if e == nil { return e } - e.buf = json.AppendUints32(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) return e } @@ -460,7 +531,7 @@ func (e *Event) Uint64(key string, i uint64) *Event { if e == nil { return e } - e.buf = json.AppendUint64(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) return e } @@ -469,7 +540,7 @@ func (e *Event) Uints64(key string, i []uint64) *Event { if e == nil { return e } - e.buf = json.AppendUints64(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) return e } @@ -478,7 +549,7 @@ func (e *Event) Float32(key string, f float32) *Event { if e == nil { return e } - e.buf = json.AppendFloat32(json.AppendKey(e.buf, key), f) + e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) return e } @@ -487,7 +558,7 @@ func (e *Event) Floats32(key string, f []float32) *Event { if e == nil { return e } - e.buf = json.AppendFloats32(json.AppendKey(e.buf, key), f) + e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) return e } @@ -496,7 +567,7 @@ func (e *Event) Float64(key string, f float64) *Event { if e == nil { return e } - e.buf = json.AppendFloat64(json.AppendKey(e.buf, key), f) + e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) return e } @@ -505,17 +576,20 @@ func (e *Event) Floats64(key string, f []float64) *Event { if e == nil { return e } - e.buf = json.AppendFloats64(json.AppendKey(e.buf, key), f) + e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) return e } // Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key. // To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one +// already. func (e *Event) Timestamp() *Event { if e == nil { return e } - e.buf = json.AppendTime(json.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) + e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) return e } @@ -524,7 +598,7 @@ func (e *Event) Time(key string, t time.Time) *Event { if e == nil { return e } - e.buf = json.AppendTime(json.AppendKey(e.buf, key), t, TimeFieldFormat) + e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) return e } @@ -533,7 +607,7 @@ func (e *Event) Times(key string, t []time.Time) *Event { if e == nil { return e } - e.buf = json.AppendTimes(json.AppendKey(e.buf, key), t, TimeFieldFormat) + e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) return e } @@ -544,7 +618,7 @@ func (e *Event) Dur(key string, d time.Duration) *Event { if e == nil { return e } - e.buf = json.AppendDuration(json.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) return e } @@ -555,7 +629,7 @@ func (e *Event) Durs(key string, d []time.Duration) *Event { if e == nil { return e } - e.buf = json.AppendDurations(json.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) return e } @@ -570,7 +644,7 @@ func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { if t.After(start) { d = t.Sub(start) } - e.buf = json.AppendDuration(json.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) return e } @@ -582,13 +656,13 @@ func (e *Event) Interface(key string, i interface{}) *Event { if obj, ok := i.(LogObjectMarshaler); ok { return e.Object(key, obj) } - e.buf = json.AppendInterface(json.AppendKey(e.buf, key), i) + e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) return e } // Caller adds the file:line of the caller with the zerolog.CallerFieldName key. func (e *Event) Caller() *Event { - return e.caller(2) + return e.caller(CallerSkipFrameCount) } func (e *Event) caller(skip int) *Event { @@ -599,6 +673,33 @@ func (e *Event) caller(skip int) *Event { if !ok { return e } - e.buf = json.AppendString(json.AppendKey(e.buf, CallerFieldName), file+":"+strconv.Itoa(line)) + e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), file+":"+strconv.Itoa(line)) + return e +} + +// IPAddr adds IPv4 or IPv6 Address to the event +func (e *Event) IPAddr(key string, ip net.IP) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) + return e +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event +func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) + return e +} + +// MACAddr adds MAC address to the event +func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { + if e == nil { + return e + } + e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) return e } diff --git a/fields.go b/fields.go index 6a19392..6b62ecc 100644 --- a/fields.go +++ b/fields.go @@ -1,10 +1,9 @@ package zerolog import ( + "net" "sort" "time" - - "github.com/rs/zerolog/internal/json" ) func appendFields(dst []byte, fields map[string]interface{}) []byte { @@ -14,82 +13,229 @@ func appendFields(dst []byte, fields map[string]interface{}) []byte { } sort.Strings(keys) for _, key := range keys { - dst = json.AppendKey(dst, key) - switch val := fields[key].(type) { + dst = enc.AppendKey(dst, key) + val := fields[key] + if val, ok := val.(LogObjectMarshaler); ok { + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(val) + dst = append(dst, e.buf...) + putEvent(e) + continue + } + switch val := val.(type) { case string: - dst = json.AppendString(dst, val) + dst = enc.AppendString(dst, val) case []byte: - dst = json.AppendBytes(dst, val) + dst = enc.AppendBytes(dst, val) case error: - dst = json.AppendError(dst, val) + marshaled := ErrorMarshalFunc(val) + switch m := marshaled.(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + dst = enc.AppendString(dst, m.Error()) + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } case []error: - dst = json.AppendErrors(dst, val) + dst = enc.AppendArrayStart(dst) + for i, err := range val { + marshaled := ErrorMarshalFunc(err) + switch m := marshaled.(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + dst = enc.AppendString(dst, m.Error()) + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + + if i < (len(val) - 1) { + enc.AppendArrayDelim(dst) + } + } + dst = enc.AppendArrayEnd(dst) case bool: - dst = json.AppendBool(dst, val) + dst = enc.AppendBool(dst, val) case int: - dst = json.AppendInt(dst, val) + dst = enc.AppendInt(dst, val) case int8: - dst = json.AppendInt8(dst, val) + dst = enc.AppendInt8(dst, val) case int16: - dst = json.AppendInt16(dst, val) + dst = enc.AppendInt16(dst, val) case int32: - dst = json.AppendInt32(dst, val) + dst = enc.AppendInt32(dst, val) case int64: - dst = json.AppendInt64(dst, val) + dst = enc.AppendInt64(dst, val) case uint: - dst = json.AppendUint(dst, val) + dst = enc.AppendUint(dst, val) case uint8: - dst = json.AppendUint8(dst, val) + dst = enc.AppendUint8(dst, val) case uint16: - dst = json.AppendUint16(dst, val) + dst = enc.AppendUint16(dst, val) case uint32: - dst = json.AppendUint32(dst, val) + dst = enc.AppendUint32(dst, val) case uint64: - dst = json.AppendUint64(dst, val) + dst = enc.AppendUint64(dst, val) case float32: - dst = json.AppendFloat32(dst, val) + dst = enc.AppendFloat32(dst, val) case float64: - dst = json.AppendFloat64(dst, val) + dst = enc.AppendFloat64(dst, val) case time.Time: - dst = json.AppendTime(dst, val, TimeFieldFormat) + dst = enc.AppendTime(dst, val, TimeFieldFormat) case time.Duration: - dst = json.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) + dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) + case *string: + if val != nil { + dst = enc.AppendString(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *bool: + if val != nil { + dst = enc.AppendBool(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int: + if val != nil { + dst = enc.AppendInt(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int8: + if val != nil { + dst = enc.AppendInt8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int16: + if val != nil { + dst = enc.AppendInt16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int32: + if val != nil { + dst = enc.AppendInt32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int64: + if val != nil { + dst = enc.AppendInt64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint: + if val != nil { + dst = enc.AppendUint(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint8: + if val != nil { + dst = enc.AppendUint8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint16: + if val != nil { + dst = enc.AppendUint16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint32: + if val != nil { + dst = enc.AppendUint32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint64: + if val != nil { + dst = enc.AppendUint64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float32: + if val != nil { + dst = enc.AppendFloat32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float64: + if val != nil { + dst = enc.AppendFloat64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *time.Time: + if val != nil { + dst = enc.AppendTime(dst, *val, TimeFieldFormat) + } else { + dst = enc.AppendNil(dst) + } + case *time.Duration: + if val != nil { + dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) + } else { + dst = enc.AppendNil(dst) + } case []string: - dst = json.AppendStrings(dst, val) + dst = enc.AppendStrings(dst, val) case []bool: - dst = json.AppendBools(dst, val) + dst = enc.AppendBools(dst, val) case []int: - dst = json.AppendInts(dst, val) + dst = enc.AppendInts(dst, val) case []int8: - dst = json.AppendInts8(dst, val) + dst = enc.AppendInts8(dst, val) case []int16: - dst = json.AppendInts16(dst, val) + dst = enc.AppendInts16(dst, val) case []int32: - dst = json.AppendInts32(dst, val) + dst = enc.AppendInts32(dst, val) case []int64: - dst = json.AppendInts64(dst, val) + dst = enc.AppendInts64(dst, val) case []uint: - dst = json.AppendUints(dst, val) + dst = enc.AppendUints(dst, val) // case []uint8: - // dst = appendUints8(dst, val) + // dst = enc.AppendUints8(dst, val) case []uint16: - dst = json.AppendUints16(dst, val) + dst = enc.AppendUints16(dst, val) case []uint32: - dst = json.AppendUints32(dst, val) + dst = enc.AppendUints32(dst, val) case []uint64: - dst = json.AppendUints64(dst, val) + dst = enc.AppendUints64(dst, val) case []float32: - dst = json.AppendFloats32(dst, val) + dst = enc.AppendFloats32(dst, val) case []float64: - dst = json.AppendFloats64(dst, val) + dst = enc.AppendFloats64(dst, val) case []time.Time: - dst = json.AppendTimes(dst, val, TimeFieldFormat) + dst = enc.AppendTimes(dst, val, TimeFieldFormat) case []time.Duration: - dst = json.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) + dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) case nil: - dst = append(dst, "null"...) + dst = enc.AppendNil(dst) + case net.IP: + dst = enc.AppendIPAddr(dst, val) + case net.IPNet: + dst = enc.AppendIPPrefix(dst, val) + case net.HardwareAddr: + dst = enc.AppendMACAddr(dst, val) default: - dst = json.AppendInterface(dst, val) + dst = enc.AppendInterface(dst, val) } } return dst diff --git a/globals.go b/globals.go index 97eed50..e9d3e77 100644 --- a/globals.go +++ b/globals.go @@ -19,12 +19,19 @@ var ( // CallerFieldName is the field name used for caller field. CallerFieldName = "caller" + // CallerSkipFrameCount is the number of stack frames to skip to find the caller. + CallerSkipFrameCount = 2 + // ErrorStackFieldName is the field name used for error stacks. ErrorStackFieldName = "stack" - // ErrorStackMarshaler extract the stack from err if any, and returns it as - // a marshaled JSON. - ErrorStackMarshaler func(err error) []byte + // ErrorStackMarshaler extract the stack from err if any. + ErrorStackMarshaler func(err error) interface{} + + // ErrorMarshalFunc allows customization of global error marshaling + ErrorMarshalFunc = func(err error) interface{} { + return err + } // TimeFieldFormat defines the time format of the Time field type. // If set to an empty string, the time is formatted as an UNIX timestamp @@ -41,6 +48,11 @@ var ( // DurationFieldInteger renders Dur fields as integer instead of float if // set to true. DurationFieldInteger = false + + // ErrorHandler is called whenever zerolog fails to write an event on its + // output. If not set, an error is printed on the stderr. This handler must + // be thread safe and non-blocking. + ErrorHandler func(err error) ) var ( @@ -56,7 +68,8 @@ func SetGlobalLevel(l Level) { atomic.StoreUint32(gLevel, uint32(l)) } -func globalLevel() Level { +// GlobalLevel returns the current global log level +func GlobalLevel() Level { return Level(atomic.LoadUint32(gLevel)) } diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..9712c96 --- /dev/null +++ b/go.mod @@ -0,0 +1,9 @@ +module github.com/rs/zerolog + +require ( + github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 + github.com/pkg/errors v0.8.0 + github.com/rs/xid v1.2.1 + github.com/zenazn/goji v0.9.0 + golang.org/x/tools v0.0.0-20190102183724-79186431cf29 +) diff --git a/hlog/hlog.go b/hlog/hlog.go index bbe8b12..f7c3a5a 100644 --- a/hlog/hlog.go +++ b/hlog/hlog.go @@ -124,12 +124,17 @@ func RefererHandler(fieldKey string) func(next http.Handler) http.Handler { type idKey struct{} -// IDFromRequest returns the unique id accociated to the request if any. +// IDFromRequest returns the unique id associated to the request if any. func IDFromRequest(r *http.Request) (id xid.ID, ok bool) { if r == nil { return } - id, ok = r.Context().Value(idKey{}).(xid.ID) + return IDFromCtx(r.Context()) +} + +// IDFromCtx returns the unique id associated to the context if any. +func IDFromCtx(ctx context.Context) (id xid.ID, ok bool) { + id, ok = ctx.Value(idKey{}).(xid.ID) return } diff --git a/hlog/hlog_example_test.go b/hlog/hlog_example_test.go index a70240e..cae91e2 100644 --- a/hlog/hlog_example_test.go +++ b/hlog/hlog_example_test.go @@ -1,3 +1,5 @@ +// +build !binary_log + package hlog_test import ( diff --git a/hlog/hlog_test.go b/hlog/hlog_test.go index 5ac960a..975ce38 100644 --- a/hlog/hlog_test.go +++ b/hlog/hlog_test.go @@ -15,8 +15,17 @@ import ( "net/http/httptest" "github.com/rs/zerolog" + "github.com/rs/zerolog/internal/cbor" ) +func decodeIfBinary(out *bytes.Buffer) string { + p := out.Bytes() + if len(p) == 0 || p[0] < 0x7F { + return out.String() + } + return cbor.DecodeObjectToStr(p) + "\n" +} + func TestNewHandler(t *testing.T) { log := zerolog.New(nil).With(). Str("foo", "bar"). @@ -42,7 +51,7 @@ func TestURLHandler(t *testing.T) { })) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"url":"/path?foo=bar"}`+"\n", out.String(); want != got { + if want, got := `{"url":"/path?foo=bar"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } @@ -58,7 +67,7 @@ func TestMethodHandler(t *testing.T) { })) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"method":"POST"}`+"\n", out.String(); want != got { + if want, got := `{"method":"POST"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } @@ -75,7 +84,7 @@ func TestRequestHandler(t *testing.T) { })) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"request":"POST /path?foo=bar"}`+"\n", out.String(); want != got { + if want, got := `{"request":"POST /path?foo=bar"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } @@ -91,7 +100,7 @@ func TestRemoteAddrHandler(t *testing.T) { })) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"ip":"1.2.3.4"}`+"\n", out.String(); want != got { + if want, got := `{"ip":"1.2.3.4"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } @@ -107,7 +116,7 @@ func TestRemoteAddrHandlerIPv6(t *testing.T) { })) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"ip":"2001:db8:a0b:12f0::1"}`+"\n", out.String(); want != got { + if want, got := `{"ip":"2001:db8:a0b:12f0::1"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } @@ -125,7 +134,7 @@ func TestUserAgentHandler(t *testing.T) { })) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"ua":"some user agent string"}`+"\n", out.String(); want != got { + if want, got := `{"ua":"some user agent string"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } @@ -143,7 +152,7 @@ func TestRefererHandler(t *testing.T) { })) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"referer":"http://foo.com/bar"}`+"\n", out.String(); want != got { + if want, got := `{"referer":"http://foo.com/bar"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } @@ -165,7 +174,7 @@ func TestRequestIDHandler(t *testing.T) { } l := FromRequest(r) l.Log().Msg("") - if want, got := fmt.Sprintf(`{"id":"%s"}`+"\n", id), out.String(); want != got { + if want, got := fmt.Sprintf(`{"id":"%s"}`+"\n", id), decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } })) @@ -185,7 +194,7 @@ func TestCombinedHandlers(t *testing.T) { })))) h = NewHandler(zerolog.New(out))(h) h.ServeHTTP(nil, r) - if want, got := `{"method":"POST","request":"POST /path?foo=bar","url":"/path?foo=bar"}`+"\n", out.String(); want != got { + if want, got := `{"method":"POST","request":"POST /path?foo=bar","url":"/path?foo=bar"}`+"\n", decodeIfBinary(out); want != got { t.Errorf("Invalid log output, got: %s, want: %s", got, want) } } diff --git a/hook.go b/hook.go index 549d85a..08133ac 100644 --- a/hook.go +++ b/hook.go @@ -6,6 +6,15 @@ type Hook interface { Run(e *Event, level Level, message string) } +// HookFunc is an adaptor to allow the use of an ordinary function +// as a Hook. +type HookFunc func(e *Event, level Level, message string) + +// Run implements the Hook interface. +func (h HookFunc) Run(e *Event, level Level, message string) { + h(e, level, message) +} + // LevelHook applies a different hook for each level. type LevelHook struct { NoLevelHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook diff --git a/hook_test.go b/hook_test.go index 351500e..19bde9d 100644 --- a/hook_test.go +++ b/hook_test.go @@ -1,209 +1,140 @@ package zerolog import ( - "testing" "bytes" "io/ioutil" + "testing" ) -type LevelNameHook struct{} - -func (h LevelNameHook) Run(e *Event, level Level, msg string) { - levelName := level.String() - if level == NoLevel { - levelName = "nolevel" - } - e.Str("level_name", levelName) -} - -type SimpleHook struct{} - -func (h SimpleHook) Run(e *Event, level Level, msg string) { - e.Bool("has_level", level != NoLevel) - e.Str("test", "logged") -} - -type CopyHook struct{} - -func (h CopyHook) Run(e *Event, level Level, msg string) { - hasLevel := level != NoLevel - e.Bool("copy_has_level", hasLevel) - if hasLevel { - e.Str("copy_level", level.String()) - } - e.Str("copy_msg", msg) -} - -type NopHook struct{} - -func (h NopHook) Run(e *Event, level Level, msg string) { -} - var ( - levelNameHook LevelNameHook - simpleHook SimpleHook - copyHook CopyHook - nopHook NopHook + levelNameHook = HookFunc(func(e *Event, level Level, msg string) { + levelName := level.String() + if level == NoLevel { + levelName = "nolevel" + } + e.Str("level_name", levelName) + }) + simpleHook = HookFunc(func(e *Event, level Level, msg string) { + e.Bool("has_level", level != NoLevel) + e.Str("test", "logged") + }) + copyHook = HookFunc(func(e *Event, level Level, msg string) { + hasLevel := level != NoLevel + e.Bool("copy_has_level", hasLevel) + if hasLevel { + e.Str("copy_level", level.String()) + } + e.Str("copy_msg", msg) + }) + nopHook = HookFunc(func(e *Event, level Level, message string) { + }) + discardHook = HookFunc(func(e *Event, level Level, message string) { + e.Discard() + }) ) func TestHook(t *testing.T) { - t.Run("Message", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook) - log.Log().Msg("test message") - if got, want := out.String(), `{"level_name":"nolevel","message":"test message"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("NoLevel", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook) - log.Log().Msg("") - if got, want := out.String(), `{"level_name":"nolevel"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Print", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook) - log.Print("") - if got, want := out.String(), `{"level":"debug","level_name":"debug"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Error", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","level_name":"error"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Copy/1", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(copyHook) - log.Log().Msg("") - if got, want := out.String(), `{"copy_has_level":false,"copy_msg":""}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Copy/2", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(copyHook) - log.Info().Msg("a message") - if got, want := out.String(), `{"level":"info","copy_has_level":true,"copy_level":"info","copy_msg":"a message","message":"a message"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Multi", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook).Hook(simpleHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","level_name":"error","has_level":true,"test":"logged"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Multi/Message", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook).Hook(simpleHook) - log.Error().Msg("a message") - if got, want := out.String(), `{"level":"error","level_name":"error","has_level":true,"test":"logged","message":"a message"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Output/single/pre", func(t *testing.T) { - ignored := &bytes.Buffer{} - out := &bytes.Buffer{} - log := New(ignored).Hook(levelNameHook).Output(out) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","level_name":"error"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Output/single/post", func(t *testing.T) { - ignored := &bytes.Buffer{} - out := &bytes.Buffer{} - log := New(ignored).Output(out).Hook(levelNameHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","level_name":"error"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Output/multi/pre", func(t *testing.T) { - ignored := &bytes.Buffer{} - out := &bytes.Buffer{} - log := New(ignored).Hook(levelNameHook).Hook(simpleHook).Output(out) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","level_name":"error","has_level":true,"test":"logged"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Output/multi/post", func(t *testing.T) { - ignored := &bytes.Buffer{} - out := &bytes.Buffer{} - log := New(ignored).Output(out).Hook(levelNameHook).Hook(simpleHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","level_name":"error","has_level":true,"test":"logged"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("Output/mixed", func(t *testing.T) { - ignored := &bytes.Buffer{} - out := &bytes.Buffer{} - log := New(ignored).Hook(levelNameHook).Output(out).Hook(simpleHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","level_name":"error","has_level":true,"test":"logged"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("With/single/pre", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook).With().Str("with", "pre").Logger() - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","with":"pre","level_name":"error"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("With/single/post", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).With().Str("with", "post").Logger().Hook(levelNameHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","with":"post","level_name":"error"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("With/multi/pre", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook).Hook(simpleHook).With().Str("with", "pre").Logger() - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","with":"pre","level_name":"error","has_level":true,"test":"logged"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("With/multi/post", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).With().Str("with", "post").Logger().Hook(levelNameHook).Hook(simpleHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","with":"post","level_name":"error","has_level":true,"test":"logged"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("With/mixed", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out).Hook(levelNameHook).With().Str("with", "mixed").Logger().Hook(simpleHook) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error","with":"mixed","level_name":"error","has_level":true,"test":"logged"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) - t.Run("None", func(t *testing.T) { - out := &bytes.Buffer{} - log := New(out) - log.Error().Msg("") - if got, want := out.String(), `{"level":"error"}`+"\n"; got != want { - t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) - } - }) + tests := []struct { + name string + want string + test func(log Logger) + }{ + {"Message", `{"level_name":"nolevel","message":"test message"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook) + log.Log().Msg("test message") + }}, + {"NoLevel", `{"level_name":"nolevel"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook) + log.Log().Msg("") + }}, + {"Print", `{"level":"debug","level_name":"debug"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook) + log.Print("") + }}, + {"Error", `{"level":"error","level_name":"error"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook) + log.Error().Msg("") + }}, + {"Copy/1", `{"copy_has_level":false,"copy_msg":""}` + "\n", func(log Logger) { + log = log.Hook(copyHook) + log.Log().Msg("") + }}, + {"Copy/2", `{"level":"info","copy_has_level":true,"copy_level":"info","copy_msg":"a message","message":"a message"}` + "\n", func(log Logger) { + log = log.Hook(copyHook) + log.Info().Msg("a message") + }}, + {"Multi", `{"level":"error","level_name":"error","has_level":true,"test":"logged"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook).Hook(simpleHook) + log.Error().Msg("") + }}, + {"Multi/Message", `{"level":"error","level_name":"error","has_level":true,"test":"logged","message":"a message"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook).Hook(simpleHook) + log.Error().Msg("a message") + }}, + {"Output/single/pre", `{"level":"error","level_name":"error"}` + "\n", func(log Logger) { + ignored := &bytes.Buffer{} + log = New(ignored).Hook(levelNameHook).Output(log.w) + log.Error().Msg("") + }}, + {"Output/single/post", `{"level":"error","level_name":"error"}` + "\n", func(log Logger) { + ignored := &bytes.Buffer{} + log = New(ignored).Output(log.w).Hook(levelNameHook) + log.Error().Msg("") + }}, + {"Output/multi/pre", `{"level":"error","level_name":"error","has_level":true,"test":"logged"}` + "\n", func(log Logger) { + ignored := &bytes.Buffer{} + log = New(ignored).Hook(levelNameHook).Hook(simpleHook).Output(log.w) + log.Error().Msg("") + }}, + {"Output/multi/post", `{"level":"error","level_name":"error","has_level":true,"test":"logged"}` + "\n", func(log Logger) { + ignored := &bytes.Buffer{} + log = New(ignored).Output(log.w).Hook(levelNameHook).Hook(simpleHook) + log.Error().Msg("") + }}, + {"Output/mixed", `{"level":"error","level_name":"error","has_level":true,"test":"logged"}` + "\n", func(log Logger) { + ignored := &bytes.Buffer{} + log = New(ignored).Hook(levelNameHook).Output(log.w).Hook(simpleHook) + log.Error().Msg("") + }}, + {"With/single/pre", `{"level":"error","with":"pre","level_name":"error"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook).With().Str("with", "pre").Logger() + log.Error().Msg("") + }}, + {"With/single/post", `{"level":"error","with":"post","level_name":"error"}` + "\n", func(log Logger) { + log = log.With().Str("with", "post").Logger().Hook(levelNameHook) + log.Error().Msg("") + }}, + {"With/multi/pre", `{"level":"error","with":"pre","level_name":"error","has_level":true,"test":"logged"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook).Hook(simpleHook).With().Str("with", "pre").Logger() + log.Error().Msg("") + }}, + {"With/multi/post", `{"level":"error","with":"post","level_name":"error","has_level":true,"test":"logged"}` + "\n", func(log Logger) { + log = log.With().Str("with", "post").Logger().Hook(levelNameHook).Hook(simpleHook) + log.Error().Msg("") + }}, + {"With/mixed", `{"level":"error","with":"mixed","level_name":"error","has_level":true,"test":"logged"}` + "\n", func(log Logger) { + log = log.Hook(levelNameHook).With().Str("with", "mixed").Logger().Hook(simpleHook) + log.Error().Msg("") + }}, + {"Discard", "", func(log Logger) { + log = log.Hook(discardHook) + log.Log().Msg("test message") + }}, + {"None", `{"level":"error"}` + "\n", func(log Logger) { + log.Error().Msg("") + }}, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + out := &bytes.Buffer{} + log := New(out) + tt.test(log) + if got, want := decodeIfBinaryToString(out.Bytes()), tt.want; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } + }) + } } func BenchmarkHooks(b *testing.B) { diff --git a/internal/cbor/README.md b/internal/cbor/README.md new file mode 100644 index 0000000..92c2e8c --- /dev/null +++ b/internal/cbor/README.md @@ -0,0 +1,56 @@ +## Reference: + CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) + +## Comparison of JSON vs CBOR + +Two main areas of reduction are: + +1. CPU usage to write a log msg +2. Size (in bytes) of log messages. + + +CPU Usage savings are below: +``` +name JSON time/op CBOR time/op delta +Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) +ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) +ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) +LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) +LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) +LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) +LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) +LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) +LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) +LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) +LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) +LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) +LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) +LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) +LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) +LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) +LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) +LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) +LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) +LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) +LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) +LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) +LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) +LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) +``` + +Log message size savings is greatly dependent on the number and type of fields in the log message. +Assuming this log message (with an Integer, timestamp and string, in addition to level). + +`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` + +Two measurements were done for the log file sizes - one without any compression, second +using [compress/zlib](https://golang.org/pkg/compress/zlib/). + +Results for 10,000 log messages: + +| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | +| :--- | :---: | :---: | +| JSON | 920 | 28 | +| CBOR | 550 | 28 | + +The example used to calculate the above data is available in [Examples](examples). diff --git a/internal/cbor/base.go b/internal/cbor/base.go new file mode 100644 index 0000000..58cd082 --- /dev/null +++ b/internal/cbor/base.go @@ -0,0 +1,11 @@ +package cbor + +type Encoder struct{} + +// AppendKey adds a key (string) to the binary encoded log message +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if len(dst) < 1 { + dst = e.AppendBeginMarker(dst) + } + return e.AppendString(dst, key) +} \ No newline at end of file diff --git a/internal/cbor/cbor.go b/internal/cbor/cbor.go new file mode 100644 index 0000000..969f591 --- /dev/null +++ b/internal/cbor/cbor.go @@ -0,0 +1,100 @@ +// Package cbor provides primitives for storing different data +// in the CBOR (binary) format. CBOR is defined in RFC7049. +package cbor + +import "time" + +const ( + majorOffset = 5 + additionalMax = 23 + + // Non Values. + additionalTypeBoolFalse byte = 20 + additionalTypeBoolTrue byte = 21 + additionalTypeNull byte = 22 + + // Integer (+ve and -ve) Sub-types. + additionalTypeIntUint8 byte = 24 + additionalTypeIntUint16 byte = 25 + additionalTypeIntUint32 byte = 26 + additionalTypeIntUint64 byte = 27 + + // Float Sub-types. + additionalTypeFloat16 byte = 25 + additionalTypeFloat32 byte = 26 + additionalTypeFloat64 byte = 27 + additionalTypeBreak byte = 31 + + // Tag Sub-types. + additionalTypeTimestamp byte = 01 + + // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml + additionalTypeTagNetworkAddr uint16 = 260 + additionalTypeTagNetworkPrefix uint16 = 261 + additionalTypeEmbeddedJSON uint16 = 262 + additionalTypeTagHexString uint16 = 263 + + // Unspecified number of elements. + additionalTypeInfiniteCount byte = 31 +) +const ( + majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 + majorTypeNegativeInt // Major type 1 + majorTypeByteString // Major type 2 + majorTypeUtf8String // Major type 3 + majorTypeArray // Major type 4 + majorTypeMap // Major type 5 + majorTypeTags // Major type 6 + majorTypeSimpleAndFloat // Major type 7 +) + +const ( + maskOutAdditionalType byte = (7 << majorOffset) + maskOutMajorType byte = 31 +) + +const ( + float32Nan = "\xfa\x7f\xc0\x00\x00" + float32PosInfinity = "\xfa\x7f\x80\x00\x00" + float32NegInfinity = "\xfa\xff\x80\x00\x00" + float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" + float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" + float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" +) + +// IntegerTimeFieldFormat indicates the format of timestamp decoded +// from an integer (time in seconds). +var IntegerTimeFieldFormat = time.RFC3339 + +// NanoTimeFieldFormat indicates the format of timestamp decoded +// from a float value (time in seconds and nano seconds). +var NanoTimeFieldFormat = time.RFC3339Nano + +func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { + byteCount := 8 + var minor byte + switch { + case number < 256: + byteCount = 1 + minor = additionalTypeIntUint8 + + case number < 65536: + byteCount = 2 + minor = additionalTypeIntUint16 + + case number < 4294967296: + byteCount = 4 + minor = additionalTypeIntUint32 + + default: + byteCount = 8 + minor = additionalTypeIntUint64 + + } + dst = append(dst, byte(major|minor)) + byteCount-- + for ; byteCount >= 0; byteCount-- { + dst = append(dst, byte(number>>(uint(byteCount)*8))) + } + return dst +} diff --git a/internal/cbor/decode_stream.go b/internal/cbor/decode_stream.go new file mode 100644 index 0000000..e3cf3b7 --- /dev/null +++ b/internal/cbor/decode_stream.go @@ -0,0 +1,614 @@ +package cbor + +// This file contains code to decode a stream of CBOR Data into JSON. + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "net" + "runtime" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +var decodeTimeZone *time.Location + +const hexTable = "0123456789abcdef" + +const isFloat32 = 4 +const isFloat64 = 8 + +func readNBytes(src *bufio.Reader, n int) []byte { + ret := make([]byte, n) + for i := 0; i < n; i++ { + ch, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) + } + ret[i] = ch + } + return ret +} + +func readByte(src *bufio.Reader) byte { + b, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) + } + return b +} + +func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 { + val := int64(0) + if minor <= 23 { + val = int64(minor) + } else { + bytesToRead := 0 + switch minor { + case additionalTypeIntUint8: + bytesToRead = 1 + case additionalTypeIntUint16: + bytesToRead = 2 + case additionalTypeIntUint32: + bytesToRead = 4 + case additionalTypeIntUint64: + bytesToRead = 8 + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) + } + pb := readNBytes(src, bytesToRead) + for i := 0; i < bytesToRead; i++ { + val = val * 256 + val += int64(pb[i]) + } + } + return val +} + +func decodeInteger(src *bufio.Reader) int64 { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { + panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) + } + val := decodeIntAdditonalType(src, minor) + if major == 0 { + return val + } + return (-1 - val) +} + +func decodeFloat(src *bufio.Reader) (float64, int) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) + } + + switch minor { + case additionalTypeFloat16: + panic(fmt.Errorf("float16 is not suppported in decodeFloat")) + + case additionalTypeFloat32: + pb := readNBytes(src, 4) + switch string(pb) { + case float32Nan: + return math.NaN(), isFloat32 + case float32PosInfinity: + return math.Inf(0), isFloat32 + case float32NegInfinity: + return math.Inf(-1), isFloat32 + } + n := uint32(0) + for i := 0; i < 4; i++ { + n = n * 256 + n += uint32(pb[i]) + } + val := math.Float32frombits(n) + return float64(val), isFloat32 + case additionalTypeFloat64: + pb := readNBytes(src, 8) + switch string(pb) { + case float64Nan: + return math.NaN(), isFloat64 + case float64PosInfinity: + return math.Inf(0), isFloat64 + case float64NegInfinity: + return math.Inf(-1), isFloat64 + } + n := uint64(0) + for i := 0; i < 8; i++ { + n = n * 256 + n += uint64(pb[i]) + } + val := math.Float64frombits(n) + return val, isFloat64 + } + panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) +} + +func decodeStringComplex(dst []byte, s string, pos uint) []byte { + i := int(pos) + start := 0 + + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} + +func decodeString(src *bufio.Reader, noQuotes bool) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeByteString { + panic(fmt.Errorf("Major type is: %d in decodeString", major)) + } + result := []byte{} + if !noQuotes { + result = append(result, '"') + } + length := decodeIntAdditonalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + result = append(result, pbs...) + if noQuotes { + return result + } + return append(result, '"') +} + +func decodeUTF8String(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUtf8String { + panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) + } + result := []byte{'"'} + length := decodeIntAdditonalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + + for i := 0; i < len; i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst := []byte{'"'} + dst = decodeStringComplex(dst, string(pbs), uint(i)) + return append(dst, '"') + } + } + // The string has no need for encoding an therefore is directly + // appended to the byte slice. + result = append(result, pbs...) + return append(result, '"') +} + +func array2Json(src *bufio.Reader, dst io.Writer) { + dst.Write([]byte{'['}) + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeArray { + panic(fmt.Errorf("Major type is: %d in array2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditonalType(src, minor) + len = int(length) + } + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + dst.Write([]byte{']'}) +} + +func map2Json(src *bufio.Reader, dst io.Writer) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeMap { + panic(fmt.Errorf("Major type is: %d in map2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditonalType(src, minor) + len = int(length) + } + dst.Write([]byte{'{'}) + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if i%2 == 0 { + // Even position values are keys. + dst.Write([]byte{':'}) + } else { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + } + dst.Write([]byte{'}'}) +} + +func decodeTagData(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeTags { + panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) + } + switch minor { + case additionalTypeTimestamp: + return decodeTimeStamp(src) + + // Tag value is larger than 256 (so uint16). + case additionalTypeIntUint16: + val := decodeIntAdditonalType(src, minor) + + switch uint16(val) { + case additionalTypeEmbeddedJSON: + pb := readByte(src) + dataMajor := pb & maskOutAdditionalType + if dataMajor != majorTypeByteString { + panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) + } + src.UnreadByte() + return decodeString(src, true) + + case additionalTypeTagNetworkAddr: + octets := decodeString(src, true) + ss := []byte{'"'} + switch len(octets) { + case 6: // MAC address. + ha := net.HardwareAddr(octets) + ss = append(append(ss, ha.String()...), '"') + case 4: // IPv4 address. + fallthrough + case 16: // IPv6 address. + ip := net.IP(octets) + ss = append(append(ss, ip.String()...), '"') + default: + panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) + } + return ss + + case additionalTypeTagNetworkPrefix: + pb := readByte(src) + if pb != byte(majorTypeMap|0x1) { + panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) + } + octets := decodeString(src, true) + val := decodeInteger(src) + ip := net.IP(octets) + var mask net.IPMask + pfxLen := int(val) + if len(octets) == 4 { + mask = net.CIDRMask(pfxLen, 32) + } else { + mask = net.CIDRMask(pfxLen, 128) + } + ipPfx := net.IPNet{IP: ip, Mask: mask} + ss := []byte{'"'} + ss = append(append(ss, ipPfx.String()...), '"') + return ss + + case additionalTypeTagHexString: + octets := decodeString(src, true) + ss := []byte{'"'} + for _, v := range octets { + ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) + } + return append(ss, '"') + + default: + panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) + } + } + panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) +} + +func decodeTimeStamp(src *bufio.Reader) []byte { + pb := readByte(src) + src.UnreadByte() + tsMajor := pb & maskOutAdditionalType + if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { + n := decodeInteger(src) + t := time.Unix(n, 0) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } else if tsMajor == majorTypeSimpleAndFloat { + n, _ := decodeFloat(src) + secs := int64(n) + n -= float64(secs) + n *= float64(1e9) + t := time.Unix(secs, int64(n)) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } + panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) +} + +func decodeSimpleFloat(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) + } + switch minor { + case additionalTypeBoolTrue: + return []byte("true") + case additionalTypeBoolFalse: + return []byte("false") + case additionalTypeNull: + return []byte("null") + case additionalTypeFloat16: + fallthrough + case additionalTypeFloat32: + fallthrough + case additionalTypeFloat64: + src.UnreadByte() + v, bc := decodeFloat(src) + ba := []byte{} + switch { + case math.IsNaN(v): + return []byte("\"NaN\"") + case math.IsInf(v, 1): + return []byte("\"+Inf\"") + case math.IsInf(v, -1): + return []byte("\"-Inf\"") + } + if bc == isFloat32 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 32) + } else if bc == isFloat64 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 64) + } else { + panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) + } + return ba + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) + } +} + +func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + major := (pb[0] & maskOutAdditionalType) + + switch major { + case majorTypeUnsignedInt: + fallthrough + case majorTypeNegativeInt: + n := decodeInteger(src) + dst.Write([]byte(strconv.Itoa(int(n)))) + + case majorTypeByteString: + s := decodeString(src, false) + dst.Write(s) + + case majorTypeUtf8String: + s := decodeUTF8String(src) + dst.Write(s) + + case majorTypeArray: + array2Json(src, dst) + + case majorTypeMap: + map2Json(src, dst) + + case majorTypeTags: + s := decodeTagData(src) + dst.Write(s) + + case majorTypeSimpleAndFloat: + s := decodeSimpleFloat(src) + dst.Write(s) + } +} + +func moreBytesToRead(src *bufio.Reader) bool { + _, e := src.ReadByte() + if e == nil { + src.UnreadByte() + return true + } + return false +} + +// Cbor2JsonManyObjects decodes all the CBOR Objects read from src +// reader. It keeps on decoding until reader returns EOF (error when reading). +// Decoded string is written to the dst. At the end of every CBOR Object +// newline is written to the output stream. +// +// Returns error (if any) that was encountered during decode. +// The child functions will generate a panic when error is encountered and +// this function will recover non-runtime Errors and return the reason as error. +func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + bufRdr := bufio.NewReader(src) + for moreBytesToRead(bufRdr) { + cbor2JsonOneObject(bufRdr, dst) + dst.Write([]byte("\n")) + } + return nil +} + +// Detect if the bytes to be printed is Binary or not. +func binaryFmt(p []byte) bool { + if len(p) > 0 && p[0] > 0x7F { + return true + } + return false +} + +func getReader(str string) *bufio.Reader { + return bufio.NewReader(strings.NewReader(str)) +} + +// DecodeIfBinaryToString converts a binary formatted log msg to a +// JSON formatted String Log message - suitable for printing to Console/Syslog. +func DecodeIfBinaryToString(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeObjectToStr checks if the input is a binary format, if so, +// it will decode a single Object and return the decoded string. +func DecodeObjectToStr(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + cbor2JsonOneObject(getReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeIfBinaryToBytes checks if the input is a binary format, if so, +// it will decode all Objects and return the decoded string as byte array. +func DecodeIfBinaryToBytes(in []byte) []byte { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(bytes.NewReader(in), &b) + return b.Bytes() + } + return in +} diff --git a/internal/cbor/decoder_test.go b/internal/cbor/decoder_test.go new file mode 100644 index 0000000..812e27f --- /dev/null +++ b/internal/cbor/decoder_test.go @@ -0,0 +1,205 @@ +package cbor + +import ( + "bytes" + "encoding/hex" + "testing" + "time" +) + +func TestDecodeInteger(t *testing.T) { + for _, tc := range integerTestCases { + gotv := decodeInteger(getReader(tc.binary)) + if gotv != int64(tc.val) { + t.Errorf("decodeInteger(0x%s)=0x%d, want: 0x%d", + hex.EncodeToString([]byte(tc.binary)), gotv, tc.val) + } + } +} + +func TestDecodeString(t *testing.T) { + for _, tt := range encodeStringTests { + got := decodeUTF8String(getReader(tt.binary)) + if string(got) != "\""+tt.json+"\"" { + t.Errorf("DecodeString(0x%s)=%s, want:\"%s\"\n", hex.EncodeToString([]byte(tt.binary)), string(got), + hex.EncodeToString([]byte(tt.json))) + } + } +} + +func TestDecodeArray(t *testing.T) { + for _, tc := range integerArrayTestCases { + buf := bytes.NewBuffer([]byte{}) + array2Json(getReader(tc.binary), buf) + if buf.String() != tc.json { + t.Errorf("array2Json(0x%s)=%s, want: %s", hex.EncodeToString([]byte(tc.binary)), buf.String(), tc.json) + } + } + //Unspecified Length Array + var infiniteArrayTestCases = []struct { + in string + out string + }{ + {"\x9f\x20\x00\x18\xc8\x14\xff", "[-1,0,200,20]"}, + {"\x9f\x38\xc7\x29\x18\xc8\x19\x01\x90\xff", "[-200,-10,200,400]"}, + {"\x9f\x01\x02\x03\xff", "[1,2,3]"}, + {"\x9f\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x18\x18\x19\xff", + "[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]"}, + } + for _, tc := range infiniteArrayTestCases { + buf := bytes.NewBuffer([]byte{}) + array2Json(getReader(tc.in), buf) + if buf.String() != tc.out { + t.Errorf("array2Json(0x%s)=%s, want: %s", hex.EncodeToString([]byte(tc.out)), buf.String(), tc.out) + } + } + for _, tc := range booleanArrayTestCases { + buf := bytes.NewBuffer([]byte{}) + array2Json(getReader(tc.binary), buf) + if buf.String() != tc.json { + t.Errorf("array2Json(0x%s)=%s, want: %s", hex.EncodeToString([]byte(tc.binary)), buf.String(), tc.json) + } + } + //TODO add cases for arrays of other types +} + +var infiniteMapDecodeTestCases = []struct { + bin []byte + json string +}{ + {[]byte("\xbf\x64IETF\x20\xff"), "{\"IETF\":-1}"}, + {[]byte("\xbf\x65Array\x84\x20\x00\x18\xc8\x14\xff"), "{\"Array\":[-1,0,200,20]}"}, +} + +var mapDecodeTestCases = []struct { + bin []byte + json string +}{ + {[]byte("\xa2\x64IETF\x20"), "{\"IETF\":-1}"}, + {[]byte("\xa2\x65Array\x84\x20\x00\x18\xc8\x14"), "{\"Array\":[-1,0,200,20]}"}, +} + +func TestDecodeMap(t *testing.T) { + for _, tc := range mapDecodeTestCases { + buf := bytes.NewBuffer([]byte{}) + map2Json(getReader(string(tc.bin)), buf) + if buf.String() != tc.json { + t.Errorf("map2Json(0x%s)=%s, want: %s", hex.EncodeToString(tc.bin), buf.String(), tc.json) + } + } + for _, tc := range infiniteMapDecodeTestCases { + buf := bytes.NewBuffer([]byte{}) + map2Json(getReader(string(tc.bin)), buf) + if buf.String() != tc.json { + t.Errorf("map2Json(0x%s)=%s, want: %s", hex.EncodeToString(tc.bin), buf.String(), tc.json) + } + } +} + +func TestDecodeBool(t *testing.T) { + for _, tc := range booleanTestCases { + got := decodeSimpleFloat(getReader(tc.binary)) + if string(got) != tc.json { + t.Errorf("decodeSimpleFloat(0x%s)=%s, want:%s", hex.EncodeToString([]byte(tc.binary)), string(got), tc.json) + } + } +} + +func TestDecodeFloat(t *testing.T) { + for _, tc := range float32TestCases { + got, _ := decodeFloat(getReader(tc.binary)) + if got != float64(tc.val) { + t.Errorf("decodeFloat(0x%s)=%f, want:%f", hex.EncodeToString([]byte(tc.binary)), got, tc.val) + } + } +} + +func TestDecodeTimestamp(t *testing.T) { + decodeTimeZone, _ = time.LoadLocation("UTC") + for _, tc := range timeIntegerTestcases { + tm := decodeTagData(getReader(tc.binary)) + if string(tm) != "\""+tc.rfcStr+"\"" { + t.Errorf("decodeFloat(0x%s)=%s, want:%s", hex.EncodeToString([]byte(tc.binary)), tm, tc.rfcStr) + } + } + for _, tc := range timeFloatTestcases { + tm := decodeTagData(getReader(tc.out)) + //Since we convert to float and back - it may be slightly off - so + //we cannot check for exact equality instead, we'll check it is + //very close to each other Less than a Microsecond (lets not yet do nanosec) + + got, _ := time.Parse(string(tm), string(tm)) + want, _ := time.Parse(tc.rfcStr, tc.rfcStr) + if got.Sub(want) > time.Microsecond { + t.Errorf("decodeFloat(0x%s)=%s, want:%s", hex.EncodeToString([]byte(tc.out)), tm, tc.rfcStr) + } + } +} + +func TestDecodeNetworkAddr(t *testing.T) { + for _, tc := range ipAddrTestCases { + d1 := decodeTagData(getReader(tc.binary)) + if string(d1) != tc.text { + t.Errorf("decodeNetworkAddr(0x%s)=%s, want:%s", hex.EncodeToString([]byte(tc.binary)), d1, tc.text) + } + } +} + +func TestDecodeMACAddr(t *testing.T) { + for _, tc := range macAddrTestCases { + d1 := decodeTagData(getReader(tc.binary)) + if string(d1) != tc.text { + t.Errorf("decodeNetworkAddr(0x%s)=%s, want:%s", hex.EncodeToString([]byte(tc.binary)), d1, tc.text) + } + } +} + +func TestDecodeIPPrefix(t *testing.T) { + for _, tc := range IPPrefixTestCases { + d1 := decodeTagData(getReader(tc.binary)) + if string(d1) != tc.text { + t.Errorf("decodeIPPrefix(0x%s)=%s, want:%s", hex.EncodeToString([]byte(tc.binary)), d1, tc.text) + } + } +} + +var compositeCborTestCases = []struct { + binary []byte + json string +}{ + {[]byte("\xbf\x64IETF\x20\x65Array\x9f\x20\x00\x18\xc8\x14\xff\xff"), "{\"IETF\":-1,\"Array\":[-1,0,200,20]}\n"}, + {[]byte("\xbf\x64IETF\x64YES!\x65Array\x9f\x20\x00\x18\xc8\x14\xff\xff"), "{\"IETF\":\"YES!\",\"Array\":[-1,0,200,20]}\n"}, +} + +func TestDecodeCbor2Json(t *testing.T) { + for _, tc := range compositeCborTestCases { + buf := bytes.NewBuffer([]byte{}) + err := Cbor2JsonManyObjects(getReader(string(tc.binary)), buf) + if buf.String() != tc.json || err != nil { + t.Errorf("cbor2JsonManyObjects(0x%s)=%s, want: %s, err:%s", hex.EncodeToString(tc.binary), buf.String(), tc.json, err.Error()) + } + } +} + +var negativeCborTestCases = []struct { + binary []byte + errStr string +}{ + {[]byte("\xb9\x64IETF\x20\x65Array\x9f\x20\x00\x18\xc8\x14"), "Tried to Read 18 Bytes.. But hit end of file"}, + {[]byte("\xbf\x64IETF\x20\x65Array\x9f\x20\x00\x18\xc8\x14"), "EOF"}, + {[]byte("\xbf\x14IETF\x20\x65Array\x9f\x20\x00\x18\xc8\x14"), "Tried to Read 40736 Bytes.. But hit end of file"}, + {[]byte("\xbf\x64IETF"), "EOF"}, + {[]byte("\xbf\x64IETF\x20\x65Array\x9f\x20\x00\x18\xc8\xff\xff\xff"), "Invalid Additional Type: 31 in decodeSimpleFloat"}, + {[]byte("\xbf\x64IETF\x20\x65Array"), "EOF"}, + {[]byte("\xbf\x64"), "Tried to Read 4 Bytes.. But hit end of file"}, +} + +func TestDecodeNegativeCbor2Json(t *testing.T) { + for _, tc := range negativeCborTestCases { + buf := bytes.NewBuffer([]byte{}) + err := Cbor2JsonManyObjects(getReader(string(tc.binary)), buf) + if err == nil || err.Error() != tc.errStr { + t.Errorf("Expected error got:%s, want:%s", err, tc.errStr) + } + } +} diff --git a/internal/cbor/examples/genLog.go b/internal/cbor/examples/genLog.go new file mode 100644 index 0000000..43a2000 --- /dev/null +++ b/internal/cbor/examples/genLog.go @@ -0,0 +1,55 @@ +package main + +import ( + "compress/zlib" + "flag" + "io" + "log" + "os" + "time" + + "github.com/rs/zerolog" +) + +func writeLog(fname string, count int, useCompress bool) { + opFile := os.Stdout + if fname != "" { + fil, _ := os.Create(fname) + opFile = fil + defer func() { + if err := fil.Close(); err != nil { + log.Fatal(err) + } + }() + } + + var f io.WriteCloser = opFile + if useCompress { + f = zlib.NewWriter(f) + defer func() { + if err := f.Close(); err != nil { + log.Fatal(err) + } + }() + + } + + zerolog.TimestampFunc = func() time.Time { return time.Now().Round(time.Second) } + log := zerolog.New(f).With(). + Timestamp(). + Logger() + for i := 0; i < count; i++ { + log.Error(). + Int("Fault", 41650+i).Msg("Some Message") + } +} + +func main() { + outFile := flag.String("out", "", "Output File to which logs will be written to (WILL overwrite if already present).") + numLogs := flag.Int("num", 10, "Number of log messages to generate.") + doCompress := flag.Bool("compress", false, "Enable inline compressed writer") + + flag.Parse() + + writeLog(*outFile, *numLogs, *doCompress) +} diff --git a/internal/cbor/examples/makefile b/internal/cbor/examples/makefile new file mode 100644 index 0000000..d28075d --- /dev/null +++ b/internal/cbor/examples/makefile @@ -0,0 +1,10 @@ +all: genLogJSON genLogCBOR + +genLogJSON: genLog.go + go build -o genLogJSON genLog.go + +genLogCBOR: genLog.go + go build -tags binary_log -o genLogCBOR genLog.go + +clean: + rm -f genLogJSON genLogCBOR diff --git a/internal/cbor/string.go b/internal/cbor/string.go new file mode 100644 index 0000000..ff42afa --- /dev/null +++ b/internal/cbor/string.go @@ -0,0 +1,68 @@ +package cbor + +// AppendStrings encodes and adds an array of strings to the dst byte array. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + major := majorTypeArray + l := len(vals) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendString(dst, v) + } + return dst +} + +// AppendString encodes and adds a string to the dst byte array. +func (Encoder) AppendString(dst []byte, s string) []byte { + major := majorTypeUtf8String + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) + } + return append(dst, s...) +} + +// AppendBytes encodes and adds an array of bytes to the dst byte array. +func (Encoder) AppendBytes(dst, s []byte) []byte { + major := majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} + +// AppendEmbeddedJSON adds a tag and embeds input JSON as such. +func AppendEmbeddedJSON(dst, s []byte) []byte { + major := majorTypeTags + minor := additionalTypeEmbeddedJSON + + // Append the TAG to indicate this is Embedded JSON. + dst = append(dst, byte(major|additionalTypeIntUint16)) + dst = append(dst, byte(minor>>8)) + dst = append(dst, byte(minor&0xff)) + + // Append the JSON Object as Byte String. + major = majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} diff --git a/internal/cbor/string_test.go b/internal/cbor/string_test.go new file mode 100644 index 0000000..0e3e9ab --- /dev/null +++ b/internal/cbor/string_test.go @@ -0,0 +1,118 @@ +package cbor + +import ( + "bytes" + "testing" +) + +var encodeStringTests = []struct { + plain string + binary string + json string //begin and end quotes are implied +}{ + {"", "\x60", ""}, + {"\\", "\x61\x5c", "\\\\"}, + {"\x00", "\x61\x00", "\\u0000"}, + {"\x01", "\x61\x01", "\\u0001"}, + {"\x02", "\x61\x02", "\\u0002"}, + {"\x03", "\x61\x03", "\\u0003"}, + {"\x04", "\x61\x04", "\\u0004"}, + {"*", "\x61*", "*"}, + {"a", "\x61a", "a"}, + {"IETF", "\x64IETF", "IETF"}, + {"abcdefghijklmnopqrstuvwxyzABCD", "\x78\x1eabcdefghijklmnopqrstuvwxyzABCD", "abcdefghijklmnopqrstuvwxyzABCD"}, + {"<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->", + "\x79\x01\x2c<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->", + "<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->"}, + {"emoji \u2764\ufe0f!", "\x6demoji ❤️!", "emoji \u2764\ufe0f!"}, +} + +var encodeByteTests = []struct { + plain []byte + binary string +}{ + {[]byte{}, "\x40"}, + {[]byte("\\"), "\x41\x5c"}, + {[]byte("\x00"), "\x41\x00"}, + {[]byte("\x01"), "\x41\x01"}, + {[]byte("\x02"), "\x41\x02"}, + {[]byte("\x03"), "\x41\x03"}, + {[]byte("\x04"), "\x41\x04"}, + {[]byte("*"), "\x41*"}, + {[]byte("a"), "\x41a"}, + {[]byte("IETF"), "\x44IETF"}, + {[]byte("abcdefghijklmnopqrstuvwxyzABCD"), "\x58\x1eabcdefghijklmnopqrstuvwxyzABCD"}, + {[]byte("<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->"), + "\x59\x01\x2c<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->" + + "<------------------------------------ This is a 100 character string ----------------------------->"}, + {[]byte("emoji \u2764\ufe0f!"), "\x4demoji ❤️!"}, +} + +func TestAppendString(t *testing.T) { + for _, tt := range encodeStringTests { + b := enc.AppendString([]byte{}, tt.plain) + if got, want := string(b), tt.binary; got != want { + t.Errorf("appendString(%q) = %#q, want %#q", tt.plain, got, want) + } + } + //Test a large string > 65535 length + + var buffer bytes.Buffer + for i := 0; i < 0x00011170; i++ { //70,000 character string + buffer.WriteString("a") + } + inp := buffer.String() + want := "\x7a\x00\x01\x11\x70" + inp + b := enc.AppendString([]byte{}, inp) + if got := string(b); got != want { + t.Errorf("appendString(%q) = %#q, want %#q", inp, got, want) + } +} + +func TestAppendBytes(t *testing.T) { + for _, tt := range encodeByteTests { + b := enc.AppendBytes([]byte{}, tt.plain) + if got, want := string(b), tt.binary; got != want { + t.Errorf("appendString(%q) = %#q, want %#q", tt.plain, got, want) + } + } + //Test a large string > 65535 length + + inp := []byte{} + for i := 0; i < 0x00011170; i++ { //70,000 character string + inp = append(inp, byte('a')) + } + want := "\x5a\x00\x01\x11\x70" + string(inp) + b := enc.AppendBytes([]byte{}, inp) + if got := string(b); got != want { + t.Errorf("appendString(%q) = %#q, want %#q", inp, got, want) + } +} +func BenchmarkAppendString(b *testing.B) { + tests := map[string]string{ + "NoEncoding": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "EncodingFirst": `"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "EncodingMiddle": `aaaaaaaaaaaaaaaaaaaaaaaaa"aaaaaaaaaaaaaaaaaaaaaaaa`, + "EncodingLast": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"`, + "MultiBytesFirst": `❤️aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "MultiBytesMiddle": `aaaaaaaaaaaaaaaaaaaaaaaaa❤️aaaaaaaaaaaaaaaaaaaaaaaa`, + "MultiBytesLast": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa❤️`, + } + for name, str := range tests { + b.Run(name, func(b *testing.B) { + buf := make([]byte, 0, 120) + for i := 0; i < b.N; i++ { + _ = enc.AppendString(buf, str) + } + }) + } +} diff --git a/internal/cbor/time.go b/internal/cbor/time.go new file mode 100644 index 0000000..12f6a1d --- /dev/null +++ b/internal/cbor/time.go @@ -0,0 +1,93 @@ +package cbor + +import ( + "time" +) + +func appendIntegerTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, byte(major|minor)) + secs := t.Unix() + var val uint64 + if secs < 0 { + major = majorTypeNegativeInt + val = uint64(-secs - 1) + } else { + major = majorTypeUnsignedInt + val = uint64(secs) + } + dst = appendCborTypePrefix(dst, major, uint64(val)) + return dst +} + +func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, byte(major|minor)) + secs := t.Unix() + nanos := t.Nanosecond() + var val float64 + val = float64(secs)*1.0 + float64(nanos)*1E-9 + return e.AppendFloat64(dst, val) +} + +// AppendTime encodes and adds a timestamp to the dst byte array. +func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { + utc := t.UTC() + if utc.Nanosecond() == 0 { + return appendIntegerTimestamp(dst, utc) + } + return e.appendFloatTimestamp(dst, utc) +} + +// AppendTimes encodes and adds an array of timestamps to the dst byte array. +func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + + for _, t := range vals { + dst = e.AppendTime(dst, t, unused) + } + return dst +} + +// AppendDuration encodes and adds a duration to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return e.AppendInt64(dst, int64(d/unit)) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations encodes and adds an array of durations to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, d := range vals { + dst = e.AppendDuration(dst, d, unit, useInt) + } + return dst +} diff --git a/internal/cbor/time_test.go b/internal/cbor/time_test.go new file mode 100644 index 0000000..d285e35 --- /dev/null +++ b/internal/cbor/time_test.go @@ -0,0 +1,99 @@ +package cbor + +import ( + "encoding/hex" + "fmt" + "math" + "testing" + "time" +) + +func TestAppendTimeNow(t *testing.T) { + tm := time.Now() + s := enc.AppendTime([]byte{}, tm, "unused") + got := string(s) + + tm1 := float64(tm.Unix()) + float64(tm.Nanosecond())*1E-9 + tm2 := math.Float64bits(tm1) + var tm3 [8]byte + for i := uint(0); i < 8; i++ { + tm3[i] = byte(tm2 >> ((8 - i - 1) * 8)) + } + want := append([]byte{0xc1, 0xfb}, tm3[:]...) + if got != string(want) { + t.Errorf("Appendtime(%s)=0x%s, want: 0x%s", + "time.Now()", hex.EncodeToString(s), + hex.EncodeToString(want)) + } +} + +var timeIntegerTestcases = []struct { + txt string + binary string + rfcStr string +}{ + {"2013-02-03T19:54:00-08:00", "\xc1\x1a\x51\x0f\x30\xd8", "2013-02-04T03:54:00Z"}, + {"1950-02-03T19:54:00-08:00", "\xc1\x3a\x25\x71\x93\xa7", "1950-02-04T03:54:00Z"}, +} + +func TestAppendTimePastPresentInteger(t *testing.T) { + for _, tt := range timeIntegerTestcases { + tin, err := time.Parse(time.RFC3339, tt.txt) + if err != nil { + fmt.Println("Cannot parse input", tt.txt, ".. Skipping!", err) + continue + } + b := enc.AppendTime([]byte{}, tin, "unused") + if got, want := string(b), tt.binary; got != want { + t.Errorf("appendString(%s) = 0x%s, want 0x%s", tt.txt, + hex.EncodeToString(b), + hex.EncodeToString([]byte(want))) + } + } +} + +var timeFloatTestcases = []struct { + rfcStr string + out string +}{ + {"2006-01-02T15:04:05.999999-08:00", "\xc1\xfb\x41\xd0\xee\x6c\x59\x7f\xff\xfc"}, + {"1956-01-02T15:04:05.999999-08:00", "\xc1\xfb\xc1\xba\x53\x81\x1a\x00\x00\x11"}, +} + +func TestAppendTimePastPresentFloat(t *testing.T) { + const timeFloatFmt = "2006-01-02T15:04:05.999999-07:00" + for _, tt := range timeFloatTestcases { + tin, err := time.Parse(timeFloatFmt, tt.rfcStr) + if err != nil { + fmt.Println("Cannot parse input", tt.rfcStr, ".. Skipping!") + continue + } + b := enc.AppendTime([]byte{}, tin, "unused") + if got, want := string(b), tt.out; got != want { + t.Errorf("appendString(%s) = 0x%s, want 0x%s", tt.rfcStr, + hex.EncodeToString(b), + hex.EncodeToString([]byte(want))) + } + } +} + +func BenchmarkAppendTime(b *testing.B) { + tests := map[string]string{ + "Integer": "Feb 3, 2013 at 7:54pm (PST)", + "Float": "2006-01-02T15:04:05.999999-08:00", + } + const timeFloatFmt = "2006-01-02T15:04:05.999999-07:00" + + for name, str := range tests { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + t, _ = time.Parse(timeFloatFmt, str) + } + b.Run(name, func(b *testing.B) { + buf := make([]byte, 0, 100) + for i := 0; i < b.N; i++ { + _ = enc.AppendTime(buf, t, "unused") + } + }) + } +} diff --git a/internal/cbor/types.go b/internal/cbor/types.go new file mode 100644 index 0000000..eb4f697 --- /dev/null +++ b/internal/cbor/types.go @@ -0,0 +1,478 @@ +package cbor + +import ( + "encoding/json" + "fmt" + "math" + "net" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull)) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount)) +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) +} + +// AppendObjectData takes an object in form of a byte array and appends to dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // BeginMarker is present in the dst, which + // should not be copied when appending to existing data. + return append(dst, o[1:]...) +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount)) +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + //No delimiters needed in cbor + return dst +} + +// AppendLineBreak is a noop that keep API compat with json encoder. +func (Encoder) AppendLineBreak(dst []byte) []byte { + // No line breaks needed in binary format. + return dst +} + +// AppendBool encodes and inserts a boolean value into the dst byte array. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + b := additionalTypeBoolFalse + if val { + b = additionalTypeBoolTrue + } + return append(dst, byte(majorTypeSimpleAndFloat|b)) +} + +// AppendBools encodes and inserts an array of boolean values into the dst byte array. +func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendBool(dst, v) + } + return dst +} + +// AppendInt encodes and inserts an integer value into the dst byte array. +func (Encoder) AppendInt(dst []byte, val int) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts(dst []byte, vals []int) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, v) + } + return dst +} + +// AppendInt8 encodes and inserts an int8 value into the dst byte array. +func (e Encoder) AppendInt8(dst []byte, val int8) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts8 encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt16 encodes and inserts a int16 value into the dst byte array. +func (e Encoder) AppendInt16(dst []byte, val int16) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts16 encodes and inserts an array of int16 values into the dst byte array. +func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt32 encodes and inserts a int32 value into the dst byte array. +func (e Encoder) AppendInt32(dst []byte, val int32) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts32 encodes and inserts an array of int32 values into the dst byte array. +func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt64 encodes and inserts a int64 value into the dst byte array. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts64 encodes and inserts an array of int64 values into the dst byte array. +func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt64(dst, v) + } + return dst +} + +// AppendUint encodes and inserts an unsigned integer value into the dst byte array. +func (e Encoder) AppendUint(dst []byte, val uint) []byte { + return e.AppendInt64(dst, int64(val)) +} + +// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array. +func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint(dst, v) + } + return dst +} + +// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array. +func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array. +func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint8(dst, v) + } + return dst +} + +// AppendUint16 encodes and inserts a uint16 value into the dst byte array. +func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array. +func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint16(dst, v) + } + return dst +} + +// AppendUint32 encodes and inserts a uint32 value into the dst byte array. +func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array. +func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint32(dst, v) + } + return dst +} + +// AppendUint64 encodes and inserts a uint64 value into the dst byte array. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array. +func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint64(dst, v) + } + return dst +} + +// AppendFloat32 encodes and inserts a single precision float value into the dst byte array. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + switch { + case math.IsNaN(float64(val)): + return append(dst, "\xfa\x7f\xc0\x00\x00"...) + case math.IsInf(float64(val), 1): + return append(dst, "\xfa\x7f\x80\x00\x00"...) + case math.IsInf(float64(val), -1): + return append(dst, "\xfa\xff\x80\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat32 + n := math.Float32bits(val) + var buf [4]byte + for i := uint(0); i < 4; i++ { + buf[i] = byte(n >> ((3 - i) * 8)) + } + return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3]) +} + +// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array. +func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat32(dst, v) + } + return dst +} + +// AppendFloat64 encodes and inserts a double precision float value into the dst byte array. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + switch { + case math.IsNaN(val): + return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, 1): + return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, -1): + return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat64 + n := math.Float64bits(val) + dst = append(dst, byte(major|subType)) + for i := uint(1); i <= 8; i++ { + b := byte(n >> ((8 - i) * 8)) + dst = append(dst, b) + } + return dst +} + +// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array. +func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat64(dst, v) + } + return dst +} + +// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := json.Marshal(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return AppendEmbeddedJSON(dst, marshaled) +} + +// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6). +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ip) +} + +// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length). +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) + dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) + + // Prefix is a tuple (aka MAP of 1 pair of elements) - + // first element is prefix, second is mask length. + dst = append(dst, byte(majorTypeMap|0x1)) + dst = e.AppendBytes(dst, pfx.IP) + maskLen, _ := pfx.Mask.Size() + return e.AppendUint8(dst, uint8(maskLen)) +} + +// AppendMACAddr encodes and inserts an Hardware (MAC) address. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ha) +} + +// AppendHex adds a TAG and inserts a hex bytes as a string. +func (e Encoder) AppendHex(dst []byte, val []byte) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagHexString>>8)) + dst = append(dst, byte(additionalTypeTagHexString&0xff)) + return e.AppendBytes(dst, val) +} diff --git a/internal/cbor/types_test.go b/internal/cbor/types_test.go new file mode 100644 index 0000000..9c31429 --- /dev/null +++ b/internal/cbor/types_test.go @@ -0,0 +1,322 @@ +package cbor + +import ( + "encoding/hex" + "net" + "testing" +) + +var enc = Encoder{} + +func TestAppendNil(t *testing.T) { + s := enc.AppendNil([]byte{}) + got := string(s) + want := "\xf6" + if got != want { + t.Errorf("appendNull() = 0x%s, want: 0x%s", hex.EncodeToString(s), + hex.EncodeToString([]byte(want))) + } +} + +var booleanTestCases = []struct { + val bool + binary string + json string +}{ + {true, "\xf5", "true"}, + {false, "\xf4", "false"}, +} + +func TestAppendBool(t *testing.T) { + for _, tc := range booleanTestCases { + s := enc.AppendBool([]byte{}, tc.val) + got := string(s) + if got != tc.binary { + t.Errorf("AppendBool(%s)=0x%s, want: 0x%s", + tc.json, hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +var booleanArrayTestCases = []struct { + val []bool + binary string + json string +}{ + {[]bool{true, false, true}, "\x83\xf5\xf4\xf5", "[true,false,true]"}, + {[]bool{true, false, false, true, false, true}, "\x86\xf5\xf4\xf4\xf5\xf4\xf5", "[true,false,false,true,false,true]"}, +} + +func TestAppendBoolArray(t *testing.T) { + for _, tc := range booleanArrayTestCases { + s := enc.AppendBools([]byte{}, tc.val) + got := string(s) + if got != tc.binary { + t.Errorf("AppendBools(%s)=0x%s, want: 0x%s", + tc.json, hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +var integerTestCases = []struct { + val int + binary string +}{ + // Value included in the type. + {0, "\x00"}, + {1, "\x01"}, + {2, "\x02"}, + {3, "\x03"}, + {8, "\x08"}, + {9, "\x09"}, + {10, "\x0a"}, + {22, "\x16"}, + {23, "\x17"}, + // Value in 1 byte. + {24, "\x18\x18"}, + {25, "\x18\x19"}, + {26, "\x18\x1a"}, + {100, "\x18\x64"}, + {254, "\x18\xfe"}, + {255, "\x18\xff"}, + // Value in 2 bytes. + {256, "\x19\x01\x00"}, + {257, "\x19\x01\x01"}, + {1000, "\x19\x03\xe8"}, + {0xFFFF, "\x19\xff\xff"}, + // Value in 4 bytes. + {0x10000, "\x1a\x00\x01\x00\x00"}, + {0xFFFFFFFE, "\x1a\xff\xff\xff\xfe"}, + {1000000, "\x1a\x00\x0f\x42\x40"}, + // Value in 8 bytes. + {0xabcd100000000, "\x1b\x00\x0a\xbc\xd1\x00\x00\x00\x00"}, + {1000000000000, "\x1b\x00\x00\x00\xe8\xd4\xa5\x10\x00"}, + // Negative number test cases. + // Value included in the type. + {-1, "\x20"}, + {-2, "\x21"}, + {-3, "\x22"}, + {-10, "\x29"}, + {-21, "\x34"}, + {-22, "\x35"}, + {-23, "\x36"}, + {-24, "\x37"}, + // Value in 1 byte. + {-25, "\x38\x18"}, + {-26, "\x38\x19"}, + {-100, "\x38\x63"}, + {-254, "\x38\xfd"}, + {-255, "\x38\xfe"}, + {-256, "\x38\xff"}, + // Value in 2 bytes. + {-257, "\x39\x01\x00"}, + {-258, "\x39\x01\x01"}, + {-1000, "\x39\x03\xe7"}, + // Value in 4 bytes. + {-0x10001, "\x3a\x00\x01\x00\x00"}, + {-0xFFFFFFFE, "\x3a\xff\xff\xff\xfd"}, + {-1000000, "\x3a\x00\x0f\x42\x3f"}, + // Value in 8 bytes. + {-0xabcd100000001, "\x3b\x00\x0a\xbc\xd1\x00\x00\x00\x00"}, + {-1000000000001, "\x3b\x00\x00\x00\xe8\xd4\xa5\x10\x00"}, +} + +func TestAppendInt(t *testing.T) { + for _, tc := range integerTestCases { + s := enc.AppendInt([]byte{}, tc.val) + got := string(s) + if got != tc.binary { + t.Errorf("AppendInt(0x%x)=0x%s, want: 0x%s", + tc.val, hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +var integerArrayTestCases = []struct { + val []int + binary string + json string +}{ + {[]int{-1, 0, 200, 20}, "\x84\x20\x00\x18\xc8\x14", "[-1,0,200,20]"}, + {[]int{-200, -10, 200, 400}, "\x84\x38\xc7\x29\x18\xc8\x19\x01\x90", "[-200,-10,200,400]"}, + {[]int{1, 2, 3}, "\x83\x01\x02\x03", "[1,2,3]"}, + {[]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25}, + "\x98\x19\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x18\x18\x19", + "[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]"}, +} + +func TestAppendIntArray(t *testing.T) { + for _, tc := range integerArrayTestCases { + s := enc.AppendInts([]byte{}, tc.val) + got := string(s) + if got != tc.binary { + t.Errorf("AppendInts(%s)=0x%s, want: 0x%s", + tc.json, hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +var float32TestCases = []struct { + val float32 + binary string +}{ + {0.0, "\xfa\x00\x00\x00\x00"}, + {-0.0, "\xfa\x00\x00\x00\x00"}, + {1.0, "\xfa\x3f\x80\x00\x00"}, + {1.5, "\xfa\x3f\xc0\x00\x00"}, + {65504.0, "\xfa\x47\x7f\xe0\x00"}, + {-4.0, "\xfa\xc0\x80\x00\x00"}, + {0.00006103515625, "\xfa\x38\x80\x00\x00"}, +} + +func TestAppendFloat32(t *testing.T) { + for _, tc := range float32TestCases { + s := enc.AppendFloat32([]byte{}, tc.val) + got := string(s) + if got != tc.binary { + t.Errorf("AppendFloat32(%f)=0x%s, want: 0x%s", + tc.val, hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +var ipAddrTestCases = []struct { + ipaddr net.IP + text string // ASCII representation of ipaddr + binary string // CBOR representation of ipaddr +}{ + {net.IP{10, 0, 0, 1}, "\"10.0.0.1\"", "\xd9\x01\x04\x44\x0a\x00\x00\x01"}, + {net.IP{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x0, 0x0, 0x0, 0x0, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34}, + "\"2001:db8:85a3::8a2e:370:7334\"", + "\xd9\x01\x04\x50\x20\x01\x0d\xb8\x85\xa3\x00\x00\x00\x00\x8a\x2e\x03\x70\x73\x34"}, +} + +func TestAppendNetworkAddr(t *testing.T) { + for _, tc := range ipAddrTestCases { + s := enc.AppendIPAddr([]byte{}, tc.ipaddr) + got := string(s) + if got != tc.binary { + t.Errorf("AppendIPAddr(%s)=0x%s, want: 0x%s", + tc.ipaddr, hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +var macAddrTestCases = []struct { + macaddr net.HardwareAddr + text string // ASCII representation of macaddr + binary string // CBOR representation of macaddr +}{ + {net.HardwareAddr{0x12, 0x34, 0x56, 0x78, 0x90, 0xab}, "\"12:34:56:78:90:ab\"", "\xd9\x01\x04\x46\x12\x34\x56\x78\x90\xab"}, + {net.HardwareAddr{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3}, "\"20:01:0d:b8:85:a3\"", "\xd9\x01\x04\x46\x20\x01\x0d\xb8\x85\xa3"}, +} + +func TestAppendMacAddr(t *testing.T) { + for _, tc := range macAddrTestCases { + s := enc.AppendMACAddr([]byte{}, tc.macaddr) + got := string(s) + if got != tc.binary { + t.Errorf("AppendMACAddr(%s)=0x%s, want: 0x%s", + tc.macaddr.String(), hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +var IPPrefixTestCases = []struct { + pfx net.IPNet + text string // ASCII representation of pfx + binary string // CBOR representation of pfx +}{ + {net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.CIDRMask(0, 32)}, "\"0.0.0.0/0\"", "\xd9\x01\x05\xa1\x44\x00\x00\x00\x00\x00"}, + {net.IPNet{IP: net.IP{192, 168, 0, 100}, Mask: net.CIDRMask(24, 32)}, "\"192.168.0.100/24\"", + "\xd9\x01\x05\xa1\x44\xc0\xa8\x00\x64\x18\x18"}, +} + +func TestAppendIPPrefix(t *testing.T) { + for _, tc := range IPPrefixTestCases { + s := enc.AppendIPPrefix([]byte{}, tc.pfx) + got := string(s) + if got != tc.binary { + t.Errorf("AppendIPPrefix(%s)=0x%s, want: 0x%s", + tc.pfx.String(), hex.EncodeToString(s), + hex.EncodeToString([]byte(tc.binary))) + } + } +} + +func BenchmarkAppendInt(b *testing.B) { + type st struct { + sz byte + val int64 + } + tests := map[string]st{ + "int-Positive": {sz: 0, val: 10000}, + "int-Negative": {sz: 0, val: -10000}, + "uint8": {sz: 1, val: 100}, + "uint16": {sz: 2, val: 0xfff}, + "uint32": {sz: 4, val: 0xffffff}, + "uint64": {sz: 8, val: 0xffffffffff}, + "int8": {sz: 21, val: -120}, + "int16": {sz: 22, val: -1200}, + "int32": {sz: 23, val: 32000}, + "int64": {sz: 24, val: 0xffffffffff}, + } + for name, str := range tests { + b.Run(name, func(b *testing.B) { + buf := make([]byte, 0, 100) + for i := 0; i < b.N; i++ { + switch str.sz { + case 0: + _ = enc.AppendInt(buf, int(str.val)) + case 1: + _ = enc.AppendUint8(buf, uint8(str.val)) + case 2: + _ = enc.AppendUint16(buf, uint16(str.val)) + case 4: + _ = enc.AppendUint32(buf, uint32(str.val)) + case 8: + _ = enc.AppendUint64(buf, uint64(str.val)) + case 21: + _ = enc.AppendInt8(buf, int8(str.val)) + case 22: + _ = enc.AppendInt16(buf, int16(str.val)) + case 23: + _ = enc.AppendInt32(buf, int32(str.val)) + case 24: + _ = enc.AppendInt64(buf, int64(str.val)) + } + } + }) + } +} + +func BenchmarkAppendFloat(b *testing.B) { + type st struct { + sz byte + val float64 + } + tests := map[string]st{ + "Float32": {sz: 4, val: 10000.12345}, + "Float64": {sz: 8, val: -10000.54321}, + } + for name, str := range tests { + b.Run(name, func(b *testing.B) { + buf := make([]byte, 0, 100) + for i := 0; i < b.N; i++ { + switch str.sz { + case 4: + _ = enc.AppendFloat32(buf, float32(str.val)) + case 8: + _ = enc.AppendFloat64(buf, str.val) + } + } + }) + } +} diff --git a/internal/json/base.go b/internal/json/base.go index 7baeec5..d6f8839 100644 --- a/internal/json/base.go +++ b/internal/json/base.go @@ -1,39 +1,12 @@ package json -func AppendKey(dst []byte, key string) []byte { - if len(dst) > 1 { +type Encoder struct{} + +// AppendKey appends a new key to the output JSON. +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if len(dst) > 1 && dst[len(dst)-1] != '{' { dst = append(dst, ',') } - dst = AppendString(dst, key) + dst = e.AppendString(dst, key) return append(dst, ':') -} - -func AppendError(dst []byte, err error) []byte { - if err == nil { - return append(dst, `null`...) - } - return AppendString(dst, err.Error()) -} - -func AppendErrors(dst []byte, errs []error) []byte { - if len(errs) == 0 { - return append(dst, '[', ']') - } - dst = append(dst, '[') - if errs[0] != nil { - dst = AppendString(dst, errs[0].Error()) - } else { - dst = append(dst, "null"...) - } - if len(errs) > 1 { - for _, err := range errs[1:] { - if err == nil { - dst = append(dst, ",null"...) - continue - } - dst = AppendString(append(dst, ','), err.Error()) - } - } - dst = append(dst, ']') - return dst -} +} \ No newline at end of file diff --git a/internal/json/bytes.go b/internal/json/bytes.go new file mode 100644 index 0000000..de64120 --- /dev/null +++ b/internal/json/bytes.go @@ -0,0 +1,85 @@ +package json + +import "unicode/utf8" + +// AppendBytes is a mirror of appendString with []byte arg +func (Encoder) AppendBytes(dst, s []byte) []byte { + dst = append(dst, '"') + for i := 0; i < len(s); i++ { + if !noEscapeTable[s[i]] { + dst = appendBytesComplex(dst, s, i) + return append(dst, '"') + } + } + dst = append(dst, s...) + return append(dst, '"') +} + +// AppendHex encodes the input bytes to a hex string and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte and encodes it as hex using +// the hex lookup table. +func (Encoder) AppendHex(dst, s []byte) []byte { + dst = append(dst, '"') + for _, v := range s { + dst = append(dst, hex[v>>4], hex[v&0x0f]) + } + return append(dst, '"') +} + +// appendBytesComplex is a mirror of the appendStringComplex +// with []byte arg +func appendBytesComplex(dst, s []byte, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRune(s[i:]) + if r == utf8.RuneError && size == 1 { + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/internal/json/bytes_test.go b/internal/json/bytes_test.go new file mode 100644 index 0000000..d1a370a --- /dev/null +++ b/internal/json/bytes_test.go @@ -0,0 +1,84 @@ +package json + +import ( + "testing" + "unicode" +) + +var enc = Encoder{} + +func TestAppendBytes(t *testing.T) { + for _, tt := range encodeStringTests { + b := enc.AppendBytes([]byte{}, []byte(tt.in)) + if got, want := string(b), tt.out; got != want { + t.Errorf("appendBytes(%q) = %#q, want %#q", tt.in, got, want) + } + } +} + +func TestAppendHex(t *testing.T) { + for _, tt := range encodeHexTests { + b := enc.AppendHex([]byte{}, []byte{tt.in}) + if got, want := string(b), tt.out; got != want { + t.Errorf("appendHex(%x) = %s, want %s", tt.in, got, want) + } + } +} + +func TestStringBytes(t *testing.T) { + t.Parallel() + // Test that encodeState.stringBytes and encodeState.string use the same encoding. + var r []rune + for i := '\u0000'; i <= unicode.MaxRune; i++ { + r = append(r, i) + } + s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too + + encStr := string(enc.AppendString([]byte{}, s)) + encBytes := string(enc.AppendBytes([]byte{}, []byte(s))) + + if encStr != encBytes { + i := 0 + for i < len(encStr) && i < len(encBytes) && encStr[i] == encBytes[i] { + i++ + } + encStr = encStr[i:] + encBytes = encBytes[i:] + i = 0 + for i < len(encStr) && i < len(encBytes) && encStr[len(encStr)-i-1] == encBytes[len(encBytes)-i-1] { + i++ + } + encStr = encStr[:len(encStr)-i] + encBytes = encBytes[:len(encBytes)-i] + + if len(encStr) > 20 { + encStr = encStr[:20] + "..." + } + if len(encBytes) > 20 { + encBytes = encBytes[:20] + "..." + } + + t.Errorf("encodings differ at %#q vs %#q", encStr, encBytes) + } +} + +func BenchmarkAppendBytes(b *testing.B) { + tests := map[string]string{ + "NoEncoding": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "EncodingFirst": `"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "EncodingMiddle": `aaaaaaaaaaaaaaaaaaaaaaaaa"aaaaaaaaaaaaaaaaaaaaaaaa`, + "EncodingLast": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"`, + "MultiBytesFirst": `❤️aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, + "MultiBytesMiddle": `aaaaaaaaaaaaaaaaaaaaaaaaa❤️aaaaaaaaaaaaaaaaaaaaaaaa`, + "MultiBytesLast": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa❤️`, + } + for name, str := range tests { + byt := []byte(str) + b.Run(name, func(b *testing.B) { + buf := make([]byte, 0, 100) + for i := 0; i < b.N; i++ { + _ = enc.AppendBytes(buf, byt) + } + }) + } +} diff --git a/internal/json/string.go b/internal/json/string.go index 8f8f4df..815906f 100644 --- a/internal/json/string.go +++ b/internal/json/string.go @@ -4,15 +4,25 @@ import "unicode/utf8" const hex = "0123456789abcdef" -func AppendStrings(dst []byte, vals []string) []byte { +var noEscapeTable = [256]bool{} + +func init() { + for i := 0; i <= 0x7e; i++ { + noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' + } +} + +// AppendStrings encodes the input strings to json and +// appends the encoded string list to the input byte slice. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') - dst = AppendString(dst, vals[0]) + dst = e.AppendString(dst, vals[0]) if len(vals) > 1 { for _, val := range vals[1:] { - dst = AppendString(append(dst, ','), val) + dst = e.AppendString(append(dst, ','), val) } } dst = append(dst, ']') @@ -28,7 +38,7 @@ func AppendStrings(dst []byte, vals []string) []byte { // entirety to the byte slice. // If we encounter a byte that does need encoding, switch up // the operation and perform a byte-by-byte read-encode-append. -func AppendString(dst []byte, s string) []byte { +func (Encoder) AppendString(dst []byte, s string) []byte { // Start with a double quote. dst = append(dst, '"') // Loop through each character in the string. @@ -36,7 +46,7 @@ func AppendString(dst []byte, s string) []byte { // Check if the character needs encoding. Control characters, slashes, // and the double quote need json encoding. Bytes above the ascii // boundary needs utf8 encoding. - if s[i] < 0x20 || s[i] > 0x7e || s[i] == '\\' || s[i] == '"' { + if !noEscapeTable[s[i]] { // We encountered a character that needs to be encoded. Switch // to complex version of the algorithm. dst = appendStringComplex(dst, s, i) @@ -74,76 +84,7 @@ func appendStringComplex(dst []byte, s string, i int) []byte { i += size continue } - if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { - i++ - continue - } - // We encountered a character that needs to be encoded. - // Let's append the previous simple characters to the byte slice - // and switch our operation to read and encode the remainder - // characters byte-by-byte. - if start < i { - dst = append(dst, s[start:i]...) - } - switch b { - case '"', '\\': - dst = append(dst, '\\', b) - case '\b': - dst = append(dst, '\\', 'b') - case '\f': - dst = append(dst, '\\', 'f') - case '\n': - dst = append(dst, '\\', 'n') - case '\r': - dst = append(dst, '\\', 'r') - case '\t': - dst = append(dst, '\\', 't') - default: - dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) - } - i++ - start = i - } - if start < len(s) { - dst = append(dst, s[start:]...) - } - return dst -} - -// AppendBytes is a mirror of appendString with []byte arg -func AppendBytes(dst, s []byte) []byte { - dst = append(dst, '"') - for i := 0; i < len(s); i++ { - if s[i] < 0x20 || s[i] > 0x7e || s[i] == '\\' || s[i] == '"' { - dst = appendBytesComplex(dst, s, i) - return append(dst, '"') - } - } - dst = append(dst, s...) - return append(dst, '"') -} - -// appendBytesComplex is a mirror of the appendStringComplex -// with []byte arg -func appendBytesComplex(dst, s []byte, i int) []byte { - start := 0 - for i < len(s) { - b := s[i] - if b >= utf8.RuneSelf { - r, size := utf8.DecodeRune(s[i:]) - if r == utf8.RuneError && size == 1 { - if start < i { - dst = append(dst, s[start:i]...) - } - dst = append(dst, `\ufffd`...) - i += size - start = i - continue - } - i += size - continue - } - if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { + if noEscapeTable[b] { i++ continue } diff --git a/internal/json/string_test.go b/internal/json/string_test.go index b0c2c32..10c1313 100644 --- a/internal/json/string_test.go +++ b/internal/json/string_test.go @@ -2,7 +2,6 @@ package json import ( "testing" - "unicode" ) var encodeStringTests = []struct { @@ -53,62 +52,27 @@ var encodeStringTests = []struct { {"emoji \u2764\ufe0f!", `"emoji ❤️!"`}, } -func TestappendString(t *testing.T) { +var encodeHexTests = []struct { + in byte + out string +}{ + {0x00, `"00"`}, + {0x0f, `"0f"`}, + {0x10, `"10"`}, + {0xf0, `"f0"`}, + {0xff, `"ff"`}, +} + +func TestAppendString(t *testing.T) { for _, tt := range encodeStringTests { - b := AppendString([]byte{}, tt.in) + b := enc.AppendString([]byte{}, tt.in) if got, want := string(b), tt.out; got != want { t.Errorf("appendString(%q) = %#q, want %#q", tt.in, got, want) } } } -func TestappendBytes(t *testing.T) { - for _, tt := range encodeStringTests { - b := AppendBytes([]byte{}, []byte(tt.in)) - if got, want := string(b), tt.out; got != want { - t.Errorf("appendBytes(%q) = %#q, want %#q", tt.in, got, want) - } - } -} - -func TestStringBytes(t *testing.T) { - t.Parallel() - // Test that encodeState.stringBytes and encodeState.string use the same encoding. - var r []rune - for i := '\u0000'; i <= unicode.MaxRune; i++ { - r = append(r, i) - } - s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too - - enc := string(AppendString([]byte{}, s)) - encBytes := string(AppendBytes([]byte{}, []byte(s))) - - if enc != encBytes { - i := 0 - for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] { - i++ - } - enc = enc[i:] - encBytes = encBytes[i:] - i = 0 - for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] { - i++ - } - enc = enc[:len(enc)-i] - encBytes = encBytes[:len(encBytes)-i] - - if len(enc) > 20 { - enc = enc[:20] + "..." - } - if len(encBytes) > 20 { - encBytes = encBytes[:20] + "..." - } - - t.Errorf("encodings differ at %#q vs %#q", enc, encBytes) - } -} - -func BenchmarkappendString(b *testing.B) { +func BenchmarkAppendString(b *testing.B) { tests := map[string]string{ "NoEncoding": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, "EncodingFirst": `"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, @@ -122,28 +86,7 @@ func BenchmarkappendString(b *testing.B) { b.Run(name, func(b *testing.B) { buf := make([]byte, 0, 100) for i := 0; i < b.N; i++ { - _ = AppendString(buf, str) - } - }) - } -} - -func BenchmarkappendBytes(b *testing.B) { - tests := map[string]string{ - "NoEncoding": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, - "EncodingFirst": `"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, - "EncodingMiddle": `aaaaaaaaaaaaaaaaaaaaaaaaa"aaaaaaaaaaaaaaaaaaaaaaaa`, - "EncodingLast": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"`, - "MultiBytesFirst": `❤️aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa`, - "MultiBytesMiddle": `aaaaaaaaaaaaaaaaaaaaaaaaa❤️aaaaaaaaaaaaaaaaaaaaaaaa`, - "MultiBytesLast": `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa❤️`, - } - for name, str := range tests { - byt := []byte(str) - b.Run(name, func(b *testing.B) { - buf := make([]byte, 0, 100) - for i := 0; i < b.N; i++ { - _ = AppendBytes(buf, byt) + _ = enc.AppendString(buf, str) } }) } diff --git a/internal/json/time.go b/internal/json/time.go index 612438d..739afff 100644 --- a/internal/json/time.go +++ b/internal/json/time.go @@ -5,14 +5,18 @@ import ( "time" ) -func AppendTime(dst []byte, t time.Time, format string) []byte { +// AppendTime formats the input time with the given format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { if format == "" { - return AppendInt64(dst, t.Unix()) + return e.AppendInt64(dst, t.Unix()) } return append(t.AppendFormat(append(dst, '"'), format), '"') } -func AppendTimes(dst []byte, vals []time.Time, format string) []byte { +// AppendTimes converts the input times with the given format +// and appends the encoded string list to the input byte slice. +func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { if format == "" { return appendUnixTimes(dst, vals) } @@ -38,29 +42,33 @@ func appendUnixTimes(dst []byte, vals []time.Time) []byte { dst = strconv.AppendInt(dst, vals[0].Unix(), 10) if len(vals) > 1 { for _, t := range vals[1:] { - dst = strconv.AppendInt(dst, t.Unix(), 10) + dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) } } dst = append(dst, ']') return dst } -func AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { +// AppendDuration formats the input duration with the given unit & format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { if useInt { return strconv.AppendInt(dst, int64(d/unit), 10) } - return AppendFloat64(dst, float64(d)/float64(unit)) + return e.AppendFloat64(dst, float64(d)/float64(unit)) } -func AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { +// AppendDurations formats the input durations with the given unit & format +// and appends the encoded string list to the input byte slice. +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') - dst = AppendDuration(dst, vals[0], unit, useInt) + dst = e.AppendDuration(dst, vals[0], unit, useInt) if len(vals) > 1 { for _, d := range vals[1:] { - dst = AppendDuration(append(dst, ','), d, unit, useInt) + dst = e.AppendDuration(append(dst, ','), d, unit, useInt) } } dst = append(dst, ']') diff --git a/internal/json/types.go b/internal/json/types.go index bbc8e42..f343c86 100644 --- a/internal/json/types.go +++ b/internal/json/types.go @@ -4,14 +4,57 @@ import ( "encoding/json" "fmt" "math" + "net" "strconv" ) -func AppendBool(dst []byte, val bool) []byte { +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, "null"...) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, '{') +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, '}') +} + +// AppendLineBreak appends a line break. +func (Encoder) AppendLineBreak(dst []byte) []byte { + return append(dst, '\n') +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, '[') +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, ']') +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + if len(dst) > 0 { + return append(dst, ',') + } + return dst +} + +// AppendBool converts the input bool to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendBool(dst []byte, val bool) []byte { return strconv.AppendBool(dst, val) } -func AppendBools(dst []byte, vals []bool) []byte { +// AppendBools encodes the input bools to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendBools(dst []byte, vals []bool) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -26,11 +69,15 @@ func AppendBools(dst []byte, vals []bool) []byte { return dst } -func AppendInt(dst []byte, val int) []byte { +// AppendInt converts the input int to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt(dst []byte, val int) []byte { return strconv.AppendInt(dst, int64(val), 10) } -func AppendInts(dst []byte, vals []int) []byte { +// AppendInts encodes the input ints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts(dst []byte, vals []int) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -45,11 +92,15 @@ func AppendInts(dst []byte, vals []int) []byte { return dst } -func AppendInt8(dst []byte, val int8) []byte { +// AppendInt8 converts the input []int8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt8(dst []byte, val int8) []byte { return strconv.AppendInt(dst, int64(val), 10) } -func AppendInts8(dst []byte, vals []int8) []byte { +// AppendInts8 encodes the input int8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -64,11 +115,15 @@ func AppendInts8(dst []byte, vals []int8) []byte { return dst } -func AppendInt16(dst []byte, val int16) []byte { +// AppendInt16 converts the input int16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt16(dst []byte, val int16) []byte { return strconv.AppendInt(dst, int64(val), 10) } -func AppendInts16(dst []byte, vals []int16) []byte { +// AppendInts16 encodes the input int16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -83,11 +138,15 @@ func AppendInts16(dst []byte, vals []int16) []byte { return dst } -func AppendInt32(dst []byte, val int32) []byte { +// AppendInt32 converts the input int32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt32(dst []byte, val int32) []byte { return strconv.AppendInt(dst, int64(val), 10) } -func AppendInts32(dst []byte, vals []int32) []byte { +// AppendInts32 encodes the input int32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -102,11 +161,15 @@ func AppendInts32(dst []byte, vals []int32) []byte { return dst } -func AppendInt64(dst []byte, val int64) []byte { +// AppendInt64 converts the input int64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { return strconv.AppendInt(dst, val, 10) } -func AppendInts64(dst []byte, vals []int64) []byte { +// AppendInts64 encodes the input int64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -121,11 +184,15 @@ func AppendInts64(dst []byte, vals []int64) []byte { return dst } -func AppendUint(dst []byte, val uint) []byte { +// AppendUint converts the input uint to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint(dst []byte, val uint) []byte { return strconv.AppendUint(dst, uint64(val), 10) } -func AppendUints(dst []byte, vals []uint) []byte { +// AppendUints encodes the input uints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints(dst []byte, vals []uint) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -140,11 +207,15 @@ func AppendUints(dst []byte, vals []uint) []byte { return dst } -func AppendUint8(dst []byte, val uint8) []byte { +// AppendUint8 converts the input uint8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint8(dst []byte, val uint8) []byte { return strconv.AppendUint(dst, uint64(val), 10) } -func AppendUints8(dst []byte, vals []uint8) []byte { +// AppendUints8 encodes the input uint8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -159,11 +230,15 @@ func AppendUints8(dst []byte, vals []uint8) []byte { return dst } -func AppendUint16(dst []byte, val uint16) []byte { +// AppendUint16 converts the input uint16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint16(dst []byte, val uint16) []byte { return strconv.AppendUint(dst, uint64(val), 10) } -func AppendUints16(dst []byte, vals []uint16) []byte { +// AppendUints16 encodes the input uint16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -178,11 +253,15 @@ func AppendUints16(dst []byte, vals []uint16) []byte { return dst } -func AppendUint32(dst []byte, val uint32) []byte { +// AppendUint32 converts the input uint32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint32(dst []byte, val uint32) []byte { return strconv.AppendUint(dst, uint64(val), 10) } -func AppendUints32(dst []byte, vals []uint32) []byte { +// AppendUints32 encodes the input uint32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -197,11 +276,15 @@ func AppendUints32(dst []byte, vals []uint32) []byte { return dst } -func AppendUint64(dst []byte, val uint64) []byte { +// AppendUint64 converts the input uint64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { return strconv.AppendUint(dst, uint64(val), 10) } -func AppendUints64(dst []byte, vals []uint64) []byte { +// AppendUints64 encodes the input uint64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { if len(vals) == 0 { return append(dst, '[', ']') } @@ -216,7 +299,7 @@ func AppendUints64(dst []byte, vals []uint64) []byte { return dst } -func AppendFloat(dst []byte, val float64, bitSize int) []byte { +func appendFloat(dst []byte, val float64, bitSize int) []byte { // JSON does not permit NaN or Infinity. A typical JSON encoder would fail // with an error, but a logging library wants the data to get thru so we // make a tradeoff and store those types as string. @@ -231,48 +314,89 @@ func AppendFloat(dst []byte, val float64, bitSize int) []byte { return strconv.AppendFloat(dst, val, 'f', -1, bitSize) } -func AppendFloat32(dst []byte, val float32) []byte { - return AppendFloat(dst, float64(val), 32) +// AppendFloat32 converts the input float32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + return appendFloat(dst, float64(val), 32) } -func AppendFloats32(dst []byte, vals []float32) []byte { +// AppendFloats32 encodes the input float32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') - dst = AppendFloat(dst, float64(vals[0]), 32) + dst = appendFloat(dst, float64(vals[0]), 32) if len(vals) > 1 { for _, val := range vals[1:] { - dst = AppendFloat(append(dst, ','), float64(val), 32) + dst = appendFloat(append(dst, ','), float64(val), 32) } } dst = append(dst, ']') return dst } -func AppendFloat64(dst []byte, val float64) []byte { - return AppendFloat(dst, val, 64) +// AppendFloat64 converts the input float64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + return appendFloat(dst, val, 64) } -func AppendFloats64(dst []byte, vals []float64) []byte { +// AppendFloats64 encodes the input float64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { if len(vals) == 0 { return append(dst, '[', ']') } dst = append(dst, '[') - dst = AppendFloat(dst, vals[0], 32) + dst = appendFloat(dst, vals[0], 32) if len(vals) > 1 { for _, val := range vals[1:] { - dst = AppendFloat(append(dst, ','), val, 64) + dst = appendFloat(append(dst, ','), val, 64) } } dst = append(dst, ']') return dst } -func AppendInterface(dst []byte, i interface{}) []byte { +// AppendInterface marshals the input interface to a string and +// appends the encoded string to the input byte slice. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { marshaled, err := json.Marshal(i) if err != nil { - return AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) } return append(dst, marshaled...) } + +// AppendObjectData takes in an object that is already in a byte array +// and adds it to the dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // Two conditions we want to put a ',' between existing content and + // new content: + // 1. new content starts with '{' - which shd be dropped OR + // 2. existing content has already other fields + if o[0] == '{' { + o[0] = ',' + } else if len(dst) > 1 { + dst = append(dst, ',') + } + return append(dst, o...) +} + +// AppendIPAddr adds IPv4 or IPv6 address to dst. +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + return e.AppendString(dst, ip.String()) +} + +// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst. +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + return e.AppendString(dst, pfx.String()) + +} + +// AppendMACAddr adds MAC address to dst. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + return e.AppendString(dst, ha.String()) +} diff --git a/internal/json/types_test.go b/internal/json/types_test.go index 6a297a8..f396299 100644 --- a/internal/json/types_test.go +++ b/internal/json/types_test.go @@ -2,31 +2,165 @@ package json import ( "math" + "net" "reflect" "testing" ) -func Test_appendFloat64(t *testing.T) { +func TestAppendType(t *testing.T) { + w := map[string]func(interface{}) []byte{ + "AppendInt": func(v interface{}) []byte { return enc.AppendInt([]byte{}, v.(int)) }, + "AppendInt8": func(v interface{}) []byte { return enc.AppendInt8([]byte{}, v.(int8)) }, + "AppendInt16": func(v interface{}) []byte { return enc.AppendInt16([]byte{}, v.(int16)) }, + "AppendInt32": func(v interface{}) []byte { return enc.AppendInt32([]byte{}, v.(int32)) }, + "AppendInt64": func(v interface{}) []byte { return enc.AppendInt64([]byte{}, v.(int64)) }, + "AppendUint": func(v interface{}) []byte { return enc.AppendUint([]byte{}, v.(uint)) }, + "AppendUint8": func(v interface{}) []byte { return enc.AppendUint8([]byte{}, v.(uint8)) }, + "AppendUint16": func(v interface{}) []byte { return enc.AppendUint16([]byte{}, v.(uint16)) }, + "AppendUint32": func(v interface{}) []byte { return enc.AppendUint32([]byte{}, v.(uint32)) }, + "AppendUint64": func(v interface{}) []byte { return enc.AppendUint64([]byte{}, v.(uint64)) }, + "AppendFloat32": func(v interface{}) []byte { return enc.AppendFloat32([]byte{}, v.(float32)) }, + "AppendFloat64": func(v interface{}) []byte { return enc.AppendFloat64([]byte{}, v.(float64)) }, + } tests := []struct { name string - input float64 + fn string + input interface{} want []byte }{ - {"-Inf", math.Inf(-1), []byte(`"-Inf"`)}, - {"+Inf", math.Inf(1), []byte(`"+Inf"`)}, - {"NaN", math.NaN(), []byte(`"NaN"`)}, - {"0", 0, []byte(`0`)}, - {"-1.1", -1.1, []byte(`-1.1`)}, - {"1e20", 1e20, []byte(`100000000000000000000`)}, - {"1e21", 1e21, []byte(`1000000000000000000000`)}, + {"AppendInt8(math.MaxInt8)", "AppendInt8", int8(math.MaxInt8), []byte("127")}, + {"AppendInt16(math.MaxInt16)", "AppendInt16", int16(math.MaxInt16), []byte("32767")}, + {"AppendInt32(math.MaxInt32)", "AppendInt32", int32(math.MaxInt32), []byte("2147483647")}, + {"AppendInt64(math.MaxInt64)", "AppendInt64", int64(math.MaxInt64), []byte("9223372036854775807")}, + + {"AppendUint8(math.MaxUint8)", "AppendUint8", uint8(math.MaxUint8), []byte("255")}, + {"AppendUint16(math.MaxUint16)", "AppendUint16", uint16(math.MaxUint16), []byte("65535")}, + {"AppendUint32(math.MaxUint32)", "AppendUint32", uint32(math.MaxUint32), []byte("4294967295")}, + {"AppendUint64(math.MaxUint64)", "AppendUint64", uint64(math.MaxUint64), []byte("18446744073709551615")}, + + {"AppendFloat32(-Inf)", "AppendFloat32", float32(math.Inf(-1)), []byte(`"-Inf"`)}, + {"AppendFloat32(+Inf)", "AppendFloat32", float32(math.Inf(1)), []byte(`"+Inf"`)}, + {"AppendFloat32(NaN)", "AppendFloat32", float32(math.NaN()), []byte(`"NaN"`)}, + {"AppendFloat32(0)", "AppendFloat32", float32(0), []byte(`0`)}, + {"AppendFloat32(-1.1)", "AppendFloat32", float32(-1.1), []byte(`-1.1`)}, + {"AppendFloat32(1e20)", "AppendFloat32", float32(1e20), []byte(`100000000000000000000`)}, + {"AppendFloat32(1e21)", "AppendFloat32", float32(1e21), []byte(`1000000000000000000000`)}, + + {"AppendFloat64(-Inf)", "AppendFloat64", float64(math.Inf(-1)), []byte(`"-Inf"`)}, + {"AppendFloat64(+Inf)", "AppendFloat64", float64(math.Inf(1)), []byte(`"+Inf"`)}, + {"AppendFloat64(NaN)", "AppendFloat64", float64(math.NaN()), []byte(`"NaN"`)}, + {"AppendFloat64(0)", "AppendFloat64", float64(0), []byte(`0`)}, + {"AppendFloat64(-1.1)", "AppendFloat64", float64(-1.1), []byte(`-1.1`)}, + {"AppendFloat64(1e20)", "AppendFloat64", float64(1e20), []byte(`100000000000000000000`)}, + {"AppendFloat64(1e21)", "AppendFloat64", float64(1e21), []byte(`1000000000000000000000`)}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := AppendFloat32([]byte{}, float32(tt.input)); !reflect.DeepEqual(got, tt.want) { - t.Errorf("appendFloat32() = %s, want %s", got, tt.want) - } - if got := AppendFloat64([]byte{}, tt.input); !reflect.DeepEqual(got, tt.want) { - t.Errorf("appendFloat32() = %s, want %s", got, tt.want) + if got := w[tt.fn](tt.input); !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %s, want %s", got, tt.want) + } + }) + } +} + +func Test_appendMAC(t *testing.T) { + MACtests := []struct { + input string + want []byte + }{ + {"01:23:45:67:89:ab", []byte(`"01:23:45:67:89:ab"`)}, + {"cd:ef:11:22:33:44", []byte(`"cd:ef:11:22:33:44"`)}, + } + for _, tt := range MACtests { + t.Run("MAC", func(t *testing.T) { + ha, _ := net.ParseMAC(tt.input) + if got := enc.AppendMACAddr([]byte{}, ha); !reflect.DeepEqual(got, tt.want) { + t.Errorf("appendMACAddr() = %s, want %s", got, tt.want) + } + }) + } +} + +func Test_appendIP(t *testing.T) { + IPv4tests := []struct { + input net.IP + want []byte + }{ + {net.IP{0, 0, 0, 0}, []byte(`"0.0.0.0"`)}, + {net.IP{192, 0, 2, 200}, []byte(`"192.0.2.200"`)}, + } + + for _, tt := range IPv4tests { + t.Run("IPv4", func(t *testing.T) { + if got := enc.AppendIPAddr([]byte{}, tt.input); !reflect.DeepEqual(got, tt.want) { + t.Errorf("appendIPAddr() = %s, want %s", got, tt.want) + } + }) + } + IPv6tests := []struct { + input net.IP + want []byte + }{ + {net.IPv6zero, []byte(`"::"`)}, + {net.IPv6linklocalallnodes, []byte(`"ff02::1"`)}, + {net.IP{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34}, []byte(`"2001:db8:85a3::8a2e:370:7334"`)}, + } + for _, tt := range IPv6tests { + t.Run("IPv6", func(t *testing.T) { + if got := enc.AppendIPAddr([]byte{}, tt.input); !reflect.DeepEqual(got, tt.want) { + t.Errorf("appendIPAddr() = %s, want %s", got, tt.want) + } + }) + } +} + +func Test_appendIPPrefix(t *testing.T) { + IPv4Prefixtests := []struct { + input net.IPNet + want []byte + }{ + {net.IPNet{IP: net.IP{0, 0, 0, 0}, Mask: net.IPv4Mask(0, 0, 0, 0)}, []byte(`"0.0.0.0/0"`)}, + {net.IPNet{IP: net.IP{192, 0, 2, 200}, Mask: net.IPv4Mask(255, 255, 255, 0)}, []byte(`"192.0.2.200/24"`)}, + } + for _, tt := range IPv4Prefixtests { + t.Run("IPv4", func(t *testing.T) { + if got := enc.AppendIPPrefix([]byte{}, tt.input); !reflect.DeepEqual(got, tt.want) { + t.Errorf("appendIPPrefix() = %s, want %s", got, tt.want) + } + }) + } + IPv6Prefixtests := []struct { + input net.IPNet + want []byte + }{ + {net.IPNet{IP: net.IPv6zero, Mask: net.CIDRMask(0, 128)}, []byte(`"::/0"`)}, + {net.IPNet{IP: net.IPv6linklocalallnodes, Mask: net.CIDRMask(128, 128)}, []byte(`"ff02::1/128"`)}, + {net.IPNet{IP: net.IP{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34}, + Mask: net.CIDRMask(64, 128)}, + []byte(`"2001:db8:85a3::8a2e:370:7334/64"`)}, + } + for _, tt := range IPv6Prefixtests { + t.Run("IPv6", func(t *testing.T) { + if got := enc.AppendIPPrefix([]byte{}, tt.input); !reflect.DeepEqual(got, tt.want) { + t.Errorf("appendIPPrefix() = %s, want %s", got, tt.want) + } + }) + } +} + +func Test_appendMac(t *testing.T) { + MACtests := []struct { + input net.HardwareAddr + want []byte + }{ + {net.HardwareAddr{0x12, 0x34, 0x56, 0x78, 0x90, 0xab}, []byte(`"12:34:56:78:90:ab"`)}, + {net.HardwareAddr{0x12, 0x34, 0x00, 0x00, 0x90, 0xab}, []byte(`"12:34:00:00:90:ab"`)}, + } + + for _, tt := range MACtests { + t.Run("MAC", func(t *testing.T) { + if got := enc.AppendMACAddr([]byte{}, tt.input); !reflect.DeepEqual(got, tt.want) { + t.Errorf("appendMAC() = %s, want %s", got, tt.want) } }) } diff --git a/journald/journald.go b/journald/journald.go new file mode 100644 index 0000000..dde4e3e --- /dev/null +++ b/journald/journald.go @@ -0,0 +1,115 @@ +// +build !windows + +// Package journald provides a io.Writer to send the logs +// to journalD component of systemd. + +package journald + +// This file provides a zerolog writer so that logs printed +// using zerolog library can be sent to a journalD. + +// Zerolog's Top level key/Value Pairs are translated to +// journald's args - all Values are sent to journald as strings. +// And all key strings are converted to uppercase before sending +// to journald (as required by journald). + +// In addition, entire log message (all Key Value Pairs), is also +// sent to journald under the key "JSON". + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/coreos/go-systemd/journal" + "github.com/rs/zerolog" + "github.com/rs/zerolog/internal/cbor" + "io" + "strings" +) + +const defaultJournalDPrio = journal.PriNotice + +// NewJournalDWriter returns a zerolog log destination +// to be used as parameter to New() calls. Writing logs +// to this writer will send the log messages to journalD +// running in this system. +func NewJournalDWriter() io.Writer { + return journalWriter{} +} + +type journalWriter struct { +} + +// levelToJPrio converts zerolog Level string into +// journalD's priority values. JournalD has more +// priorities than zerolog. +func levelToJPrio(zLevel string) journal.Priority { + lvl, _ := zerolog.ParseLevel(zLevel) + + switch lvl { + case zerolog.DebugLevel: + return journal.PriDebug + case zerolog.InfoLevel: + return journal.PriInfo + case zerolog.WarnLevel: + return journal.PriWarning + case zerolog.ErrorLevel: + return journal.PriErr + case zerolog.FatalLevel: + return journal.PriCrit + case zerolog.PanicLevel: + return journal.PriEmerg + case zerolog.NoLevel: + return journal.PriNotice + } + return defaultJournalDPrio +} + +func (w journalWriter) Write(p []byte) (n int, err error) { + if !journal.Enabled() { + err = fmt.Errorf("Cannot connect to journalD!!") + return + } + var event map[string]interface{} + p = cbor.DecodeIfBinaryToBytes(p) + d := json.NewDecoder(bytes.NewReader(p)) + d.UseNumber() + err = d.Decode(&event) + jPrio := defaultJournalDPrio + args := make(map[string]string, 0) + if err != nil { + return + } + if l, ok := event[zerolog.LevelFieldName].(string); ok { + jPrio = levelToJPrio(l) + } + + msg := "" + for key, value := range event { + jKey := strings.ToUpper(key) + switch key { + case zerolog.LevelFieldName, zerolog.TimestampFieldName: + continue + case zerolog.MessageFieldName: + msg, _ = value.(string) + continue + } + + switch value.(type) { + case string: + args[jKey], _ = value.(string) + case json.Number: + args[jKey] = fmt.Sprint(value) + default: + b, err := json.Marshal(value) + if err != nil { + args[jKey] = fmt.Sprintf("[error: %v]", err) + } else { + args[jKey] = string(b) + } + } + } + args["JSON"] = string(p) + err = journal.Send(msg, jPrio, args) + return +} diff --git a/journald/journald_test.go b/journald/journald_test.go new file mode 100644 index 0000000..7ea40b5 --- /dev/null +++ b/journald/journald_test.go @@ -0,0 +1,44 @@ +// +build !windows + +package journald_test + +import "github.com/rs/zerolog" +import "github.com/rs/zerolog/journald" + +func ExampleNewJournalDWriter() { + log := zerolog.New(journald.NewJournalDWriter()) + log.Info().Str("foo", "bar").Uint64("small", 123).Float64("float", 3.14).Uint64("big", 1152921504606846976).Msg("Journal Test") + // Output: +} + +/* + +There is no automated way to verify the output - since the output is sent +to journald process and method to retrieve is journalctl. Will find a way +to automate the process and fix this test. + +$ journalctl -o verbose -f + +Thu 2018-04-26 22:30:20.768136 PDT [s=3284d695bde946e4b5017c77a399237f;i=329f0;b=98c0dca0debc4b98a5b9534e910e7dd6;m=7a702e35dd4;t=56acdccd2ed0a;x=4690034cf0348614] + PRIORITY=6 + _AUDIT_LOGINUID=1000 + _BOOT_ID=98c0dca0debc4b98a5b9534e910e7dd6 + _MACHINE_ID=926ed67eb4744580948de70fb474975e + _HOSTNAME=sprint + _UID=1000 + _GID=1000 + _CAP_EFFECTIVE=0 + _SYSTEMD_SLICE=-.slice + _TRANSPORT=journal + _SYSTEMD_CGROUP=/ + _AUDIT_SESSION=2945 + MESSAGE=Journal Test + FOO=bar + BIG=1152921504606846976 + _COMM=journald.test + SMALL=123 + FLOAT=3.14 + JSON={"level":"info","foo":"bar","small":123,"float":3.14,"big":1152921504606846976,"message":"Journal Test"} + _PID=27103 + _SOURCE_REALTIME_TIMESTAMP=1524807020768136 +*/ diff --git a/log.go b/log.go index 843a94b..8eb45a8 100644 --- a/log.go +++ b/log.go @@ -82,6 +82,20 @@ // log.Warn().Msg("") // // Output: {"level":"warn","severity":"warn"} // +// +// Caveats +// +// There is no fields deduplication out-of-the-box. +// Using the same key multiple times creates new key in final JSON each time. +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Timestamp(). +// Msg("dup") +// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +// +// However, it’s not a big deal though as JSON accepts dup keys, +// the last one prevails. package zerolog import ( @@ -134,6 +148,28 @@ func (l Level) String() string { return "" } +// ParseLevel converts a level string into a zerolog Level value. +// returns an error if the input string does not match known values. +func ParseLevel(levelStr string) (Level, error) { + switch levelStr { + case DebugLevel.String(): + return DebugLevel, nil + case InfoLevel.String(): + return InfoLevel, nil + case WarnLevel.String(): + return WarnLevel, nil + case ErrorLevel.String(): + return ErrorLevel, nil + case FatalLevel.String(): + return FatalLevel, nil + case PanicLevel.String(): + return PanicLevel, nil + case NoLevel.String(): + return NoLevel, nil + } + return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) +} + // A Logger represents an active logging object that generates lines // of JSON output to an io.Writer. Each logging operation makes a single // call to the Writer's Write method. There is no guaranty on access @@ -256,22 +292,24 @@ func (l *Logger) Error() *Event { } // Fatal starts a new message with fatal level. The os.Exit(1) function -// is called by the Msg method. +// is called by the Msg method, which terminates the program immediately. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Fatal() *Event { return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) } -// Panic starts a new message with panic level. The message is also sent -// to the panic function. +// Panic starts a new message with panic level. The panic() function +// is called by the Msg method, which stops the ordinary flow of a goroutine. // // You must call Msg on the returned event in order to send the event. func (l *Logger) Panic() *Event { return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) } -// WithLevel starts a new message with level. +// WithLevel starts a new message with level. Unlike Fatal and Panic +// methods, WithLevel does not terminate the program or stop the ordinary +// flow of a gourotine when used with their respective levels. // // You must call Msg on the returned event in order to send the event. func (l *Logger) WithLevel(level Level) *Event { @@ -285,9 +323,9 @@ func (l *Logger) WithLevel(level Level) *Event { case ErrorLevel: return l.Error() case FatalLevel: - return l.Fatal() + return l.newEvent(FatalLevel, nil) case PanicLevel: - return l.Panic() + return l.newEvent(PanicLevel, nil) case NoLevel: return l.Log() case Disabled: @@ -338,24 +376,21 @@ func (l *Logger) newEvent(level Level, done func(string)) *Event { if !enabled { return nil } - e := newEvent(l.w, level, true) + e := newEvent(l.w, level) e.done = done e.ch = l.hooks if level != NoLevel { e.Str(LevelFieldName, level.String()) } - if len(l.context) > 0 { - if len(e.buf) > 1 { - e.buf = append(e.buf, ',') - } - e.buf = append(e.buf, l.context...) + if l.context != nil && len(l.context) > 0 { + e.buf = enc.AppendObjectData(e.buf, l.context) } return e } // should returns true if the log event should be logged. func (l *Logger) should(lvl Level) bool { - if lvl < l.level || lvl < globalLevel() { + if lvl < l.level || lvl < GlobalLevel() { return false } if l.sampler != nil && !samplingDisabled() { diff --git a/log/log.go b/log/log.go index ad61913..dd92ab9 100644 --- a/log/log.go +++ b/log/log.go @@ -22,7 +22,7 @@ func With() zerolog.Context { return Logger.With() } -// Level crestes a child logger with the minium accepted level set to level. +// Level creates a child logger with the minimum accepted level set to level. func Level(level zerolog.Level) zerolog.Logger { return Logger.Level(level) } @@ -89,7 +89,7 @@ func WithLevel(level zerolog.Level) *zerolog.Event { } // Log starts a new message with no level. Setting zerolog.GlobalLevel to -// zerlog.Disabled will still disable events produced by this method. +// zerolog.Disabled will still disable events produced by this method. // // You must call Msg on the returned event in order to send the event. func Log() *zerolog.Event { diff --git a/log/log_example_test.go b/log/log_example_test.go new file mode 100644 index 0000000..4938435 --- /dev/null +++ b/log/log_example_test.go @@ -0,0 +1,143 @@ +// +build !binary_log + +package log_test + +import ( + "errors" + "flag" + "os" + "time" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +// setup would normally be an init() function, however, there seems +// to be something awry with the testing framework when we set the +// global Logger from an init() +func setup() { + // UNIX Time is faster and smaller than most timestamps + // If you set zerolog.TimeFieldFormat to an empty string, + // logs will write with UNIX time + zerolog.TimeFieldFormat = "" + // In order to always output a static time to stdout for these + // examples to pass, we need to override zerolog.TimestampFunc + // and log.Logger globals -- you would not normally need to do this + zerolog.TimestampFunc = func() time.Time { + return time.Date(2008, 1, 8, 17, 5, 05, 0, time.UTC) + } + log.Logger = zerolog.New(os.Stdout).With().Timestamp().Logger() +} + +// Simple logging example using the Print function in the log package +// Note that both Print and Printf are at the debug log level by default +func ExamplePrint() { + setup() + + log.Print("hello world") + // Output: {"level":"debug","time":1199811905,"message":"hello world"} +} + +// Simple logging example using the Printf function in the log package +func ExamplePrintf() { + setup() + + log.Printf("hello %s", "world") + // Output: {"level":"debug","time":1199811905,"message":"hello world"} +} + +// Example of a log with no particular "level" +func ExampleLog() { + setup() + log.Log().Msg("hello world") + + // Output: {"time":1199811905,"message":"hello world"} +} + +// Example of a log at a particular "level" (in this case, "debug") +func ExampleDebug() { + setup() + log.Debug().Msg("hello world") + + // Output: {"level":"debug","time":1199811905,"message":"hello world"} +} + +// Example of a log at a particular "level" (in this case, "info") +func ExampleInfo() { + setup() + log.Info().Msg("hello world") + + // Output: {"level":"info","time":1199811905,"message":"hello world"} +} + +// Example of a log at a particular "level" (in this case, "warn") +func ExampleWarn() { + setup() + log.Warn().Msg("hello world") + + // Output: {"level":"warn","time":1199811905,"message":"hello world"} +} + +// Example of a log at a particular "level" (in this case, "error") +func ExampleError() { + setup() + log.Error().Msg("hello world") + + // Output: {"level":"error","time":1199811905,"message":"hello world"} +} + +// Example of a log at a particular "level" (in this case, "fatal") +func ExampleFatal() { + setup() + err := errors.New("A repo man spends his life getting into tense situations") + service := "myservice" + + log.Fatal(). + Err(err). + Str("service", service). + Msgf("Cannot start %s", service) + + // Outputs: {"level":"fatal","time":1199811905,"error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} +} + +// TODO: Panic + +// This example uses command-line flags to demonstrate various outputs +// depending on the chosen log level. +func Example() { + setup() + debug := flag.Bool("debug", false, "sets log level to debug") + + flag.Parse() + + // Default level for this example is info, unless debug flag is present + zerolog.SetGlobalLevel(zerolog.InfoLevel) + if *debug { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + log.Debug().Msg("This message appears only when log level set to Debug") + log.Info().Msg("This message appears when log level set to Debug or Info") + + if e := log.Debug(); e.Enabled() { + // Compute log output only if enabled. + value := "bar" + e.Str("foo", value).Msg("some debug message") + } + + // Output: {"level":"info","time":1199811905,"message":"This message appears when log level set to Debug or Info"} +} + +// TODO: Output + +// TODO: With + +// TODO: Level + +// TODO: Sample + +// TODO: Hook + +// TODO: WithLevel + +// TODO: Ctx diff --git a/log_example_test.go b/log_example_test.go index 0570ad5..7344745 100644 --- a/log_example_test.go +++ b/log_example_test.go @@ -1,8 +1,12 @@ +// +build !binary_log + package zerolog_test import ( "errors" + "fmt" stdlog "log" + "net" "os" "time" @@ -13,7 +17,6 @@ func ExampleNew() { log := zerolog.New(os.Stdout) log.Info().Msg("hello world") - // Output: {"level":"info","message":"hello world"} } @@ -45,8 +48,8 @@ func ExampleLogger_Sample() { log.Info().Msg("message 3") log.Info().Msg("message 4") - // Output: {"level":"info","message":"message 2"} - // {"level":"info","message":"message 4"} + // Output: {"level":"info","message":"message 1"} + // {"level":"info","message":"message 3"} } type LevelNameHook struct{} @@ -193,6 +196,22 @@ func (u User) MarshalZerologObject(e *zerolog.Event) { Time("created", u.Created) } +type Price struct { + val uint64 + prec int + unit string +} + +func (p Price) MarshalZerologObject(e *zerolog.Event) { + denom := uint64(1) + for i := 0; i < p.prec; i++ { + denom *= 10 + } + result := []byte(p.unit) + result = append(result, fmt.Sprintf("%d.%d", p.val/denom, p.val%denom)...) + e.Str("price", string(result)) +} + type Users []User func (uu Users) MarshalZerologArray(a *zerolog.Array) { @@ -246,6 +265,19 @@ func ExampleEvent_Object() { // Output: {"foo":"bar","user":{"name":"John","age":35,"created":"0001-01-01T00:00:00Z"},"message":"hello world"} } +func ExampleEvent_EmbedObject() { + log := zerolog.New(os.Stdout) + + price := Price{val: 6449, prec: 2, unit: "$"} + + log.Log(). + Str("foo", "bar"). + EmbedObject(price). + Msg("hello world") + + // Output: {"foo":"bar","price":"$64.49","message":"hello world"} +} + func ExampleEvent_Interface() { log := zerolog.New(os.Stdout) @@ -349,6 +381,20 @@ func ExampleContext_Object() { // Output: {"foo":"bar","user":{"name":"John","age":35,"created":"0001-01-01T00:00:00Z"},"message":"hello world"} } +func ExampleContext_EmbedObject() { + + price := Price{val: 6449, prec: 2, unit: "$"} + + log := zerolog.New(os.Stdout).With(). + Str("foo", "bar"). + EmbedObject(price). + Logger() + + log.Log().Msg("hello world") + + // Output: {"foo":"bar","price":"$64.49","message":"hello world"} +} + func ExampleContext_Interface() { obj := struct { Name string `json:"name"` @@ -394,3 +440,36 @@ func ExampleContext_Durs() { // Output: {"foo":"bar","durs":[10000,20000],"message":"hello world"} } + +func ExampleContext_IPAddr() { + hostIP := net.IP{192, 168, 0, 100} + log := zerolog.New(os.Stdout).With(). + IPAddr("HostIP", hostIP). + Logger() + + log.Log().Msg("hello world") + + // Output: {"HostIP":"192.168.0.100","message":"hello world"} +} + +func ExampleContext_IPPrefix() { + route := net.IPNet{IP: net.IP{192, 168, 0, 0}, Mask: net.CIDRMask(24, 32)} + log := zerolog.New(os.Stdout).With(). + IPPrefix("Route", route). + Logger() + + log.Log().Msg("hello world") + + // Output: {"Route":"192.168.0.0/24","message":"hello world"} +} + +func ExampleContext_MacAddr() { + mac := net.HardwareAddr{0x00, 0x14, 0x22, 0x01, 0x23, 0x45} + log := zerolog.New(os.Stdout).With(). + MACAddr("hostMAC", mac). + Logger() + + log.Log().Msg("hello world") + + // Output: {"hostMAC":"00:14:22:01:23:45","message":"hello world"} +} diff --git a/log_test.go b/log_test.go index a0aeb03..50cd1a6 100644 --- a/log_test.go +++ b/log_test.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "net" "reflect" "runtime" "testing" @@ -15,7 +16,7 @@ func TestLog(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Msg("") - if got, want := out.String(), "{}\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), "{}\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -24,7 +25,7 @@ func TestLog(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Str("foo", "bar").Msg("") - if got, want := out.String(), `{"foo":"bar"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -36,7 +37,7 @@ func TestLog(t *testing.T) { Str("foo", "bar"). Int("n", 123). Msg("") - if got, want := out.String(), `{"foo":"bar","n":123}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","n":123}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -47,7 +48,7 @@ func TestInfo(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Info().Msg("") - if got, want := out.String(), `{"level":"info"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -56,7 +57,7 @@ func TestInfo(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Info().Str("foo", "bar").Msg("") - if got, want := out.String(), `{"level":"info","foo":"bar"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info","foo":"bar"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -68,7 +69,7 @@ func TestInfo(t *testing.T) { Str("foo", "bar"). Int("n", 123). Msg("") - if got, want := out.String(), `{"level":"info","foo":"bar","n":123}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info","foo":"bar","n":123}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -77,7 +78,10 @@ func TestInfo(t *testing.T) { func TestWith(t *testing.T) { out := &bytes.Buffer{} ctx := New(out).With(). - Str("foo", "bar"). + Str("string", "foo"). + Bytes("bytes", []byte("bar")). + Hex("hex", []byte{0x12, 0xef}). + RawJSON("json", []byte(`{"some":"json"}`)). AnErr("some_err", nil). Err(errors.New("some error")). Bool("bool", true). @@ -91,14 +95,14 @@ func TestWith(t *testing.T) { Uint16("uint16", 8). Uint32("uint32", 9). Uint64("uint64", 10). - Float32("float32", 11). - Float64("float64", 12). + Float32("float32", 11.101). + Float64("float64", 12.30303). Time("time", time.Time{}) _, file, line, _ := runtime.Caller(0) caller := fmt.Sprintf("%s:%d", file, line+3) log := ctx.Caller().Logger() log.Log().Msg("") - if got, want := out.String(), `{"foo":"bar","error":"some error","bool":true,"int":1,"int8":2,"int16":3,"int32":4,"int64":5,"uint":6,"uint8":7,"uint16":8,"uint32":9,"uint64":10,"float32":11,"float64":12,"time":"0001-01-01T00:00:00Z","caller":"`+caller+`"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":"foo","bytes":"bar","hex":"12ef","json":{"some":"json"},"error":"some error","bool":true,"int":1,"int8":2,"int16":3,"int32":4,"int64":5,"uint":6,"uint8":7,"uint16":8,"uint32":9,"uint64":10,"float32":11.101,"float64":12.30303,"time":"0001-01-01T00:00:00Z","caller":"`+caller+`"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -124,10 +128,84 @@ func TestFieldsMap(t *testing.T) { "uint64": uint64(10), "float32": float32(11), "float64": float64(12), + "ipv6": net.IP{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34}, "dur": 1 * time.Second, "time": time.Time{}, + "obj": obj{"a", "b", 1}, }).Msg("") - if got, want := out.String(), `{"bool":true,"bytes":"bar","dur":1000,"error":"some error","float32":11,"float64":12,"int":1,"int16":3,"int32":4,"int64":5,"int8":2,"nil":null,"string":"foo","time":"0001-01-01T00:00:00Z","uint":6,"uint16":8,"uint32":9,"uint64":10,"uint8":7}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"bool":true,"bytes":"bar","dur":1000,"error":"some error","float32":11,"float64":12,"int":1,"int16":3,"int32":4,"int64":5,"int8":2,"ipv6":"2001:db8:85a3::8a2e:370:7334","nil":null,"obj":{"Pub":"a","Tag":"b","priv":1},"string":"foo","time":"0001-01-01T00:00:00Z","uint":6,"uint16":8,"uint32":9,"uint64":10,"uint8":7}`+"\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } +} + +func TestFieldsMapPnt(t *testing.T) { + out := &bytes.Buffer{} + log := New(out) + log.Log().Fields(map[string]interface{}{ + "string": new(string), + "bool": new(bool), + "int": new(int), + "int8": new(int8), + "int16": new(int16), + "int32": new(int32), + "int64": new(int64), + "uint": new(uint), + "uint8": new(uint8), + "uint16": new(uint16), + "uint32": new(uint32), + "uint64": new(uint64), + "float32": new(float32), + "float64": new(float64), + "dur": new(time.Duration), + "time": new(time.Time), + }).Msg("") + if got, want := decodeIfBinaryToString(out.Bytes()), `{"bool":false,"dur":0,"float32":0,"float64":0,"int":0,"int16":0,"int32":0,"int64":0,"int8":0,"string":"","time":"0001-01-01T00:00:00Z","uint":0,"uint16":0,"uint32":0,"uint64":0,"uint8":0}`+"\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } +} + +func TestFieldsMapNilPnt(t *testing.T) { + var ( + stringPnt *string + boolPnt *bool + intPnt *int + int8Pnt *int8 + int16Pnt *int16 + int32Pnt *int32 + int64Pnt *int64 + uintPnt *uint + uint8Pnt *uint8 + uint16Pnt *uint16 + uint32Pnt *uint32 + uint64Pnt *uint64 + float32Pnt *float32 + float64Pnt *float64 + durPnt *time.Duration + timePnt *time.Time + ) + out := &bytes.Buffer{} + log := New(out) + fields := map[string]interface{}{ + "string": stringPnt, + "bool": boolPnt, + "int": intPnt, + "int8": int8Pnt, + "int16": int16Pnt, + "int32": int32Pnt, + "int64": int64Pnt, + "uint": uintPnt, + "uint8": uint8Pnt, + "uint16": uint16Pnt, + "uint32": uint32Pnt, + "uint64": uint64Pnt, + "float32": float32Pnt, + "float64": float64Pnt, + "dur": durPnt, + "time": timePnt, + } + + log.Log().Fields(fields).Msg("") + if got, want := decodeIfBinaryToString(out.Bytes()), `{"bool":null,"dur":null,"float32":null,"float64":null,"int":null,"int16":null,"int32":null,"int64":null,"int8":null,"string":null,"time":null,"uint":null,"uint16":null,"uint32":null,"uint64":null,"uint8":null}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -142,6 +220,8 @@ func TestFields(t *testing.T) { Caller(). Str("string", "foo"). Bytes("bytes", []byte("bar")). + Hex("hex", []byte{0x12, 0xef}). + RawJSON("json", []byte(`{"some":"json"}`)). AnErr("some_err", nil). Err(errors.New("some error")). Bool("bool", true). @@ -155,13 +235,17 @@ func TestFields(t *testing.T) { Uint16("uint16", 8). Uint32("uint32", 9). Uint64("uint64", 10). - Float32("float32", 11). - Float64("float64", 12). + IPAddr("IPv4", net.IP{192, 168, 0, 100}). + IPAddr("IPv6", net.IP{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34}). + MACAddr("Mac", net.HardwareAddr{0x00, 0x14, 0x22, 0x01, 0x23, 0x45}). + IPPrefix("Prefix", net.IPNet{IP: net.IP{192, 168, 0, 100}, Mask: net.CIDRMask(24, 32)}). + Float32("float32", 11.1234). + Float64("float64", 12.321321321). Dur("dur", 1*time.Second). Time("time", time.Time{}). TimeDiff("diff", now, now.Add(-10*time.Second)). Msg("") - if got, want := out.String(), `{"caller":"`+caller+`","string":"foo","bytes":"bar","error":"some error","bool":true,"int":1,"int8":2,"int16":3,"int32":4,"int64":5,"uint":6,"uint8":7,"uint16":8,"uint32":9,"uint64":10,"float32":11,"float64":12,"dur":1000,"time":"0001-01-01T00:00:00Z","diff":10000}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"caller":"`+caller+`","string":"foo","bytes":"bar","hex":"12ef","json":{"some":"json"},"error":"some error","bool":true,"int":1,"int8":2,"int16":3,"int32":4,"int64":5,"uint":6,"uint8":7,"uint16":8,"uint32":9,"uint64":10,"IPv4":"192.168.0.100","IPv6":"2001:db8:85a3::8a2e:370:7334","Mac":"00:14:22:01:23:45","Prefix":"192.168.0.100/24","float32":11.1234,"float64":12.321321321,"dur":1000,"time":"0001-01-01T00:00:00Z","diff":10000}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -188,7 +272,7 @@ func TestFieldsArrayEmpty(t *testing.T) { Durs("dur", []time.Duration{}). Times("time", []time.Time{}). Msg("") - if got, want := out.String(), `{"string":[],"err":[],"bool":[],"int":[],"int8":[],"int16":[],"int32":[],"int64":[],"uint":[],"uint8":[],"uint16":[],"uint32":[],"uint64":[],"float32":[],"float64":[],"dur":[],"time":[]}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":[],"err":[],"bool":[],"int":[],"int8":[],"int16":[],"int32":[],"int64":[],"uint":[],"uint8":[],"uint16":[],"uint32":[],"uint64":[],"float32":[],"float64":[],"dur":[],"time":[]}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -215,7 +299,7 @@ func TestFieldsArraySingleElement(t *testing.T) { Durs("dur", []time.Duration{1 * time.Second}). Times("time", []time.Time{time.Time{}}). Msg("") - if got, want := out.String(), `{"string":["foo"],"err":["some error"],"bool":[true],"int":[1],"int8":[2],"int16":[3],"int32":[4],"int64":[5],"uint":[6],"uint8":[7],"uint16":[8],"uint32":[9],"uint64":[10],"float32":[11],"float64":[12],"dur":[1000],"time":["0001-01-01T00:00:00Z"]}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":["foo"],"err":["some error"],"bool":[true],"int":[1],"int8":[2],"int16":[3],"int32":[4],"int64":[5],"uint":[6],"uint8":[7],"uint16":[8],"uint32":[9],"uint64":[10],"float32":[11],"float64":[12],"dur":[1000],"time":["0001-01-01T00:00:00Z"]}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -242,7 +326,7 @@ func TestFieldsArrayMultipleElement(t *testing.T) { Durs("dur", []time.Duration{1 * time.Second, 0}). Times("time", []time.Time{time.Time{}, time.Time{}}). Msg("") - if got, want := out.String(), `{"string":["foo","bar"],"err":["some error",null],"bool":[true,false],"int":[1,0],"int8":[2,0],"int16":[3,0],"int32":[4,0],"int64":[5,0],"uint":[6,0],"uint8":[7,0],"uint16":[8,0],"uint32":[9,0],"uint64":[10,0],"float32":[11,0],"float64":[12,0],"dur":[1000,0],"time":["0001-01-01T00:00:00Z","0001-01-01T00:00:00Z"]}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":["foo","bar"],"err":["some error",null],"bool":[true,false],"int":[1,0],"int8":[2,0],"int16":[3,0],"int32":[4,0],"int64":[5,0],"uint":[6,0],"uint8":[7,0],"uint16":[8,0],"uint32":[9,0],"uint64":[10,0],"float32":[11,0],"float64":[12,0],"dur":[1000,0],"time":["0001-01-01T00:00:00Z","0001-01-01T00:00:00Z"]}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -254,6 +338,7 @@ func TestFieldsDisabled(t *testing.T) { log.Debug(). Str("string", "foo"). Bytes("bytes", []byte("bar")). + Hex("hex", []byte{0x12, 0xef}). AnErr("some_err", nil). Err(errors.New("some error")). Bool("bool", true). @@ -273,7 +358,7 @@ func TestFieldsDisabled(t *testing.T) { Time("time", time.Time{}). TimeDiff("diff", now, now.Add(-10*time.Second)). Msg("") - if got, want := out.String(), ""; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -282,7 +367,7 @@ func TestMsgf(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Msgf("one %s %.1f %d %v", "two", 3.4, 5, errors.New("six")) - if got, want := out.String(), `{"message":"one two 3.4 5 six"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"one two 3.4 5 six"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -291,7 +376,7 @@ func TestWithAndFieldsCombined(t *testing.T) { out := &bytes.Buffer{} log := New(out).With().Str("f1", "val").Str("f2", "val").Logger() log.Log().Str("f3", "val").Msg("") - if got, want := out.String(), `{"f1":"val","f2":"val","f3":"val"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"f1":"val","f2":"val","f3":"val"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -301,7 +386,7 @@ func TestLevel(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(Disabled) log.Info().Msg("test") - if got, want := out.String(), ""; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -310,7 +395,7 @@ func TestLevel(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(Disabled) log.Log().Msg("test") - if got, want := out.String(), ""; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -319,7 +404,7 @@ func TestLevel(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(InfoLevel) log.Log().Msg("test") - if got, want := out.String(), `{"message":"test"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -328,7 +413,7 @@ func TestLevel(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(PanicLevel) log.Log().Msg("test") - if got, want := out.String(), `{"message":"test"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -337,7 +422,7 @@ func TestLevel(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(InfoLevel) log.WithLevel(NoLevel).Msg("test") - if got, want := out.String(), `{"message":"test"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -346,7 +431,7 @@ func TestLevel(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(InfoLevel) log.Info().Msg("test") - if got, want := out.String(), `{"level":"info","message":"test"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info","message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) @@ -359,7 +444,22 @@ func TestSampling(t *testing.T) { log.Log().Int("i", 2).Msg("") log.Log().Int("i", 3).Msg("") log.Log().Int("i", 4).Msg("") - if got, want := out.String(), "{\"i\":2}\n{\"i\":4}\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), "{\"i\":1}\n{\"i\":3}\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } +} + +func TestDiscard(t *testing.T) { + out := &bytes.Buffer{} + log := New(out) + log.Log().Discard().Str("a", "b").Msgf("one %s %.1f %d %v", "two", 3.4, 5, errors.New("six")) + if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } + + // Double call + log.Log().Discard().Discard().Str("a", "b").Msgf("one %s %.1f %d %v", "two", 3.4, 5, errors.New("six")) + if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -376,6 +476,7 @@ func (lw *levelWriter) Write(p []byte) (int, error) { } func (lw *levelWriter) WriteLevel(lvl Level, p []byte) (int, error) { + p = decodeIfBinaryToBytes(p) lw.ops = append(lw.ops, struct { l Level p string @@ -433,7 +534,7 @@ func TestContextTimestamp(t *testing.T) { log := New(out).With().Timestamp().Str("foo", "bar").Logger() log.Log().Msg("hello world") - if got, want := out.String(), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -449,7 +550,7 @@ func TestEventTimestamp(t *testing.T) { log := New(out).With().Str("foo", "bar").Logger() log.Log().Timestamp().Msg("hello world") - if got, want := out.String(), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -460,7 +561,7 @@ func TestOutputWithoutTimestamp(t *testing.T) { log := New(ignoredOut).Output(out).With().Str("foo", "bar").Logger() log.Log().Msg("hello world") - if got, want := out.String(), `{"foo":"bar","message":"hello world"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } @@ -477,7 +578,86 @@ func TestOutputWithTimestamp(t *testing.T) { log := New(ignoredOut).Output(out).With().Timestamp().Str("foo", "bar").Logger() log.Log().Msg("hello world") - if got, want := out.String(), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { + if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } + +type loggableError struct { + error +} + +func (l loggableError) MarshalZerologObject(e *Event) { + e.Str("message", l.error.Error()+": loggableError") +} + +func TestErrorMarshalFunc(t *testing.T) { + out := &bytes.Buffer{} + log := New(out) + + // test default behaviour + log.Log().Err(errors.New("err")).Msg("msg") + if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":"err","message":"msg"}`+"\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } + out.Reset() + + log.Log().Err(loggableError{errors.New("err")}).Msg("msg") + if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":{"message":"err: loggableError"},"message":"msg"}`+"\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } + out.Reset() + + // test overriding the ErrorMarshalFunc + originalErrorMarshalFunc := ErrorMarshalFunc + defer func() { + ErrorMarshalFunc = originalErrorMarshalFunc + }() + + ErrorMarshalFunc = func(err error) interface{} { + return err.Error() + ": marshaled string" + } + log.Log().Err(errors.New("err")).Msg("msg") + if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":"err: marshaled string","message":"msg"}`+"\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } + + out.Reset() + ErrorMarshalFunc = func(err error) interface{} { + return errors.New(err.Error() + ": new error") + } + log.Log().Err(errors.New("err")).Msg("msg") + if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":"err: new error","message":"msg"}`+"\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } + + out.Reset() + ErrorMarshalFunc = func(err error) interface{} { + return loggableError{err} + } + log.Log().Err(errors.New("err")).Msg("msg") + if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":{"message":"err: loggableError"},"message":"msg"}`+"\n"; got != want { + t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) + } +} + +type errWriter struct { + error +} + +func (w errWriter) Write(p []byte) (n int, err error) { + return 0, w.error +} + +func TestErrorHandler(t *testing.T) { + var got error + want := errors.New("write error") + ErrorHandler = func(err error) { + got = err + } + log := New(errWriter{want}) + log.Log().Msg("test") + if got != want { + t.Errorf("ErrorHandler err = %#v, want %#v", got, want) + } +} diff --git a/pkgerrors/stacktrace.go b/pkgerrors/stacktrace.go index fff1c47..a868e56 100644 --- a/pkgerrors/stacktrace.go +++ b/pkgerrors/stacktrace.go @@ -1,11 +1,9 @@ package pkgerrors import ( - "bytes" "fmt" "github.com/pkg/errors" - "github.com/rs/zerolog/internal/json" ) var ( @@ -17,7 +15,7 @@ var ( // MarshalStack implements pkg/errors stack trace marshaling. // // zerolog.ErrorStackMarshaler = MarshalStack -func MarshalStack(err error) []byte { +func MarshalStack(err error) interface{} { type stackTracer interface { StackTrace() errors.StackTrace } @@ -27,40 +25,13 @@ func MarshalStack(err error) []byte { } else { return nil } - return appendJSONStack(make([]byte, 0, 500), st) -} - -func appendJSONStack(dst []byte, st errors.StackTrace) []byte { - buf := bytes.NewBuffer(make([]byte, 0, 100)) - dst = append(dst, '[') - for i, frame := range st { - if i > 0 { - dst = append(dst, ',') - } - - dst = append(dst, '{') - - fmt.Fprintf(buf, "%s", frame) - dst = json.AppendString(dst, StackSourceFileName) - dst = append(dst, ':') - dst = json.AppendBytes(dst, buf.Bytes()) - dst = append(dst, ',') - buf.Reset() - - fmt.Fprintf(buf, "%d", frame) - dst = json.AppendString(dst, StackSourceLineName) - dst = append(dst, ':') - dst = json.AppendBytes(dst, buf.Bytes()) - dst = append(dst, ',') - buf.Reset() - - fmt.Fprintf(buf, "%n", frame) - dst = json.AppendString(dst, StackSourceFunctionName) - dst = append(dst, ':') - dst = json.AppendBytes(dst, buf.Bytes()) - - dst = append(dst, '}') + out := make([]map[string]string, 0, len(st)) + for _, frame := range st { + out = append(out, map[string]string{ + StackSourceFileName: fmt.Sprintf("%s", frame), + StackSourceLineName: fmt.Sprintf("%d", frame), + StackSourceFunctionName: fmt.Sprintf("%n", frame), + }) } - dst = append(dst, ']') - return dst + return out } diff --git a/pkgerrors/stacktrace_test.go b/pkgerrors/stacktrace_test.go index bee1df0..a77428f 100644 --- a/pkgerrors/stacktrace_test.go +++ b/pkgerrors/stacktrace_test.go @@ -1,3 +1,5 @@ +// +build !binary_log + package pkgerrors import ( @@ -19,7 +21,7 @@ func TestLogStack(t *testing.T) { log.Log().Stack().Err(err).Msg("") got := out.String() - want := `\{"stack":\[\{"source":"stacktrace_test.go","line":"18","func":"TestLogStack"\},.*\],"error":"from error: error message"\}\n` + want := `\{"stack":\[\{"func":"TestLogStack","line":"20","source":"stacktrace_test.go"\},.*\],"error":"from error: error message"\}\n` if ok, _ := regexp.MatchString(want, got); !ok { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } diff --git a/pretty.png b/pretty.png index c181164..34e4308 100644 Binary files a/pretty.png and b/pretty.png differ diff --git a/sampler.go b/sampler.go index 3f00e1f..2360f0d 100644 --- a/sampler.go +++ b/sampler.go @@ -47,7 +47,7 @@ type BasicSampler struct { // Sample implements the Sampler interface. func (s *BasicSampler) Sample(lvl Level) bool { c := atomic.AddUint32(&s.counter, 1) - return c%s.N == 0 + return c%s.N == s.N-1 } // BurstSampler lets Burst events pass per Period then pass the decision to diff --git a/sampler_test.go b/sampler_test.go index 35323a2..e42ad3a 100644 --- a/sampler_test.go +++ b/sampler_test.go @@ -1,3 +1,5 @@ +// +build !binary_log + package zerolog import ( @@ -13,7 +15,14 @@ var samplers = []struct { wantMax int }{ { - "BasicSampler", + "BasicSampler_1", + func() Sampler { + return &BasicSampler{N: 1} + }, + 100, 100, 100, + }, + { + "BasicSampler_5", func() Sampler { return &BasicSampler{N: 5} }, diff --git a/syslog.go b/syslog.go index 03be1d5..82b470e 100644 --- a/syslog.go +++ b/syslog.go @@ -1,8 +1,11 @@ // +build !windows +// +build !binary_log package zerolog -import "io" +import ( + "io" +) // SyslogWriter is an interface matching a syslog.Writer struct. type SyslogWriter interface { diff --git a/syslog_test.go b/syslog_test.go index 95aed65..94d15d9 100644 --- a/syslog_test.go +++ b/syslog_test.go @@ -1,3 +1,4 @@ +// +build !binary_log // +build !windows package zerolog diff --git a/writer_test.go b/writer_test.go index 63e26c7..7d06e4b 100644 --- a/writer_test.go +++ b/writer_test.go @@ -1,3 +1,4 @@ +// +build !binary_log // +build !windows package zerolog