From 37d888101d60d11d180a7daf8508674e436fd00f Mon Sep 17 00:00:00 2001 From: "Mark G." Date: Wed, 16 Jul 2025 16:04:55 -0700 Subject: [PATCH] [Housekeeping] Remove tunnel code #382 * Remove all tunnel handling code * `plumber-schemas` can now be updated to remove tunnel options --- README.md | 12 - actions/actions.go | 7 - actions/actionsfakes/fake_iactions.go | 403 -------- actions/tunnel.go | 243 ----- backends/activemq/tunnel.go | 71 -- backends/awskinesis/tunnel.go | 81 -- backends/awskinesis/tunnel_test.go | 114 --- backends/awssns/tunnel.go | 78 -- backends/awssns/tunnel_test.go | 193 ---- backends/awssqs/tunnel.go | 72 -- backends/awssqs/tunnel_test.go | 138 --- backends/azure-eventhub/tunnel.go | 71 -- backends/azure-servicebus/tunnel.go | 103 -- backends/backends.go | 10 +- backends/cdcmongo/tunnel.go | 15 - backends/cdcpostgres/tunnel.go | 15 - backends/gcppubsub/tunnel.go | 70 -- backends/kafka/tunnel.go | 119 --- backends/kubemq-queue/tunnel.go | 15 - backends/memphis/tunnel.go | 82 -- backends/mqtt/tunnel.go | 73 -- backends/mqtt/tunnel_test.go | 166 ---- backends/nats-jetstream/tunnel.go | 68 -- backends/nats-streaming/tunnel.go | 67 -- backends/nats-streaming/tunnel_test.go | 137 --- backends/nats-streaming/write_test.go | 5 - backends/nats/tunnel.go | 67 -- backends/nsq/tunnel.go | 15 - backends/pulsar/tunnel.go | 70 -- backends/pulsar/tunnel_test.go | 170 ---- backends/rabbit-streams/tunnel.go | 83 -- backends/rabbitmq/tunnel.go | 80 -- backends/rabbitmq/tunnel_test.go | 145 --- backends/rpubsub/tunnel.go | 68 -- backends/rpubsub/tunnel_test.go | 69 -- backends/rstreams/tunnel.go | 75 -- backends/rstreams/tunnel_test.go | 75 -- bus/broadcast_consumer.go | 12 - bus/broadcast_consumer_connection.go | 10 - bus/broadcast_consumer_tunnel.go | 114 --- bus/bus.go | 6 - bus/bus_noop.go | 20 - bus/busfakes/fake_ibus.go | 381 -------- bus/message.go | 7 - bus/publish_helpers.go | 44 - config/config.go | 34 - go.mod | 3 - go.sum | 6 - options/options.go | 88 -- plumber/cli_manage.go | 14 - plumber/cli_manage_tunnel.go | 158 ---- plumber/cli_server.go | 27 - plumber/cli_tunnel.go | 47 - plumber/plumber.go | 2 - prometheus/prometheus.go | 1 - server/connections_handlers.go | 11 - server/connections_test.go | 1 - server/relay_test.go | 1 - server/tunnel_handlers.go | 205 +--- server/tunnel_test.go | 158 ---- server/types/dynamic.go | 136 --- server/types/relay.go | 4 +- tunnel/tunnel.go | 388 -------- tunnel/tunnelfakes/fake_itunnel.go | 326 ------- validate/backends.go | 4 - validate/server.go | 6 - .../github.com/dustin/go-humanize/.travis.yml | 21 - vendor/github.com/dustin/go-humanize/LICENSE | 21 - .../dustin/go-humanize/README.markdown | 124 --- vendor/github.com/dustin/go-humanize/big.go | 31 - .../github.com/dustin/go-humanize/bigbytes.go | 173 ---- vendor/github.com/dustin/go-humanize/bytes.go | 143 --- vendor/github.com/dustin/go-humanize/comma.go | 116 --- .../github.com/dustin/go-humanize/commaf.go | 40 - vendor/github.com/dustin/go-humanize/ftoa.go | 46 - .../github.com/dustin/go-humanize/humanize.go | 8 - .../github.com/dustin/go-humanize/number.go | 192 ---- .../github.com/dustin/go-humanize/ordinals.go | 25 - vendor/github.com/dustin/go-humanize/si.go | 123 --- vendor/github.com/dustin/go-humanize/times.go | 117 --- .../github.com/kataras/tablewriter/.gitignore | 15 - .../kataras/tablewriter/.travis.yml | 12 - .../github.com/kataras/tablewriter/LICENCE.md | 19 - .../github.com/kataras/tablewriter/README.md | 277 ------ vendor/github.com/kataras/tablewriter/csv.go | 52 -- .../github.com/kataras/tablewriter/table.go | 884 ------------------ .../kataras/tablewriter/table_with_color.go | 134 --- vendor/github.com/kataras/tablewriter/util.go | 93 -- vendor/github.com/kataras/tablewriter/wrap.go | 99 -- .../lensesio/tableprinter/.gitattributes | 2 - .../lensesio/tableprinter/.gitignore | 2 - .../lensesio/tableprinter/.travis.yml | 20 - .../lensesio/tableprinter/CODE_OF_CONDUCT.md | 74 -- .../lensesio/tableprinter/CONTRIBUTING.md | 10 - .../github.com/lensesio/tableprinter/LICENSE | 185 ---- .../github.com/lensesio/tableprinter/NOTICE | 5 - .../lensesio/tableprinter/README.md | 87 -- .../lensesio/tableprinter/color.png | Bin 10638 -> 0 bytes .../github.com/lensesio/tableprinter/doc.go | 1 - .../github.com/lensesio/tableprinter/json.go | 42 - .../github.com/lensesio/tableprinter/map.go | 159 ---- .../lensesio/tableprinter/parser.go | 65 -- .../lensesio/tableprinter/quanity_util.go | 134 --- .../github.com/lensesio/tableprinter/row.go | 391 -------- .../github.com/lensesio/tableprinter/slice.go | 65 -- .../lensesio/tableprinter/struct.go | 421 --------- .../lensesio/tableprinter/tableprinter.go | 467 --------- .../lensesio/tableprinter/terminal_util.go | 26 - .../tableprinter/terminal_util_stubs.go | 7 - vendor/modules.txt | 9 - 110 files changed, 14 insertions(+), 10482 deletions(-) delete mode 100644 actions/tunnel.go delete mode 100644 backends/activemq/tunnel.go delete mode 100644 backends/awskinesis/tunnel.go delete mode 100644 backends/awskinesis/tunnel_test.go delete mode 100644 backends/awssns/tunnel.go delete mode 100644 backends/awssns/tunnel_test.go delete mode 100644 backends/awssqs/tunnel.go delete mode 100644 backends/awssqs/tunnel_test.go delete mode 100644 backends/azure-eventhub/tunnel.go delete mode 100644 backends/azure-servicebus/tunnel.go delete mode 100644 backends/cdcmongo/tunnel.go delete mode 100644 backends/cdcpostgres/tunnel.go delete mode 100644 backends/gcppubsub/tunnel.go delete mode 100644 backends/kafka/tunnel.go delete mode 100644 backends/kubemq-queue/tunnel.go delete mode 100644 backends/memphis/tunnel.go delete mode 100644 backends/mqtt/tunnel.go delete mode 100644 backends/mqtt/tunnel_test.go delete mode 100644 backends/nats-jetstream/tunnel.go delete mode 100644 backends/nats-streaming/tunnel.go delete mode 100644 backends/nats-streaming/tunnel_test.go delete mode 100644 backends/nats/tunnel.go delete mode 100644 backends/nsq/tunnel.go delete mode 100644 backends/pulsar/tunnel.go delete mode 100644 backends/pulsar/tunnel_test.go delete mode 100644 backends/rabbit-streams/tunnel.go delete mode 100644 backends/rabbitmq/tunnel.go delete mode 100644 backends/rabbitmq/tunnel_test.go delete mode 100644 backends/rpubsub/tunnel.go delete mode 100644 backends/rpubsub/tunnel_test.go delete mode 100644 backends/rstreams/tunnel.go delete mode 100644 backends/rstreams/tunnel_test.go delete mode 100644 bus/broadcast_consumer_tunnel.go delete mode 100644 plumber/cli_manage_tunnel.go delete mode 100644 plumber/cli_tunnel.go delete mode 100644 server/tunnel_test.go delete mode 100644 server/types/dynamic.go delete mode 100644 tunnel/tunnel.go delete mode 100644 tunnel/tunnelfakes/fake_itunnel.go delete mode 100644 vendor/github.com/dustin/go-humanize/.travis.yml delete mode 100644 vendor/github.com/dustin/go-humanize/LICENSE delete mode 100644 vendor/github.com/dustin/go-humanize/README.markdown delete mode 100644 vendor/github.com/dustin/go-humanize/big.go delete mode 100644 vendor/github.com/dustin/go-humanize/bigbytes.go delete mode 100644 vendor/github.com/dustin/go-humanize/bytes.go delete mode 100644 vendor/github.com/dustin/go-humanize/comma.go delete mode 100644 vendor/github.com/dustin/go-humanize/commaf.go delete mode 100644 vendor/github.com/dustin/go-humanize/ftoa.go delete mode 100644 vendor/github.com/dustin/go-humanize/humanize.go delete mode 100644 vendor/github.com/dustin/go-humanize/number.go delete mode 100644 vendor/github.com/dustin/go-humanize/ordinals.go delete mode 100644 vendor/github.com/dustin/go-humanize/si.go delete mode 100644 vendor/github.com/dustin/go-humanize/times.go delete mode 100644 vendor/github.com/kataras/tablewriter/.gitignore delete mode 100644 vendor/github.com/kataras/tablewriter/.travis.yml delete mode 100644 vendor/github.com/kataras/tablewriter/LICENCE.md delete mode 100644 vendor/github.com/kataras/tablewriter/README.md delete mode 100644 vendor/github.com/kataras/tablewriter/csv.go delete mode 100644 vendor/github.com/kataras/tablewriter/table.go delete mode 100644 vendor/github.com/kataras/tablewriter/table_with_color.go delete mode 100644 vendor/github.com/kataras/tablewriter/util.go delete mode 100644 vendor/github.com/kataras/tablewriter/wrap.go delete mode 100644 vendor/github.com/lensesio/tableprinter/.gitattributes delete mode 100644 vendor/github.com/lensesio/tableprinter/.gitignore delete mode 100644 vendor/github.com/lensesio/tableprinter/.travis.yml delete mode 100644 vendor/github.com/lensesio/tableprinter/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/lensesio/tableprinter/CONTRIBUTING.md delete mode 100644 vendor/github.com/lensesio/tableprinter/LICENSE delete mode 100644 vendor/github.com/lensesio/tableprinter/NOTICE delete mode 100644 vendor/github.com/lensesio/tableprinter/README.md delete mode 100644 vendor/github.com/lensesio/tableprinter/color.png delete mode 100644 vendor/github.com/lensesio/tableprinter/doc.go delete mode 100644 vendor/github.com/lensesio/tableprinter/json.go delete mode 100644 vendor/github.com/lensesio/tableprinter/map.go delete mode 100644 vendor/github.com/lensesio/tableprinter/parser.go delete mode 100644 vendor/github.com/lensesio/tableprinter/quanity_util.go delete mode 100644 vendor/github.com/lensesio/tableprinter/row.go delete mode 100644 vendor/github.com/lensesio/tableprinter/slice.go delete mode 100644 vendor/github.com/lensesio/tableprinter/struct.go delete mode 100644 vendor/github.com/lensesio/tableprinter/tableprinter.go delete mode 100644 vendor/github.com/lensesio/tableprinter/terminal_util.go delete mode 100644 vendor/github.com/lensesio/tableprinter/terminal_util_stubs.go diff --git a/README.md b/README.md index 1c48cb161..958f1ed35 100644 --- a/README.md +++ b/README.md @@ -215,18 +215,6 @@ In order to flip a boolean flag to `false`, prepend `--no` to the flag. ie. `--queue-declare` is `true` by default. To make it false, use `--no-queue-declare`. -## Tunnels - -`plumber` can now act as a replay destination (tunnel). Tunnel mode allows you to -run an instance of plumber, on your local network, which will then be available -in the Streamdal platform as a _replay destination_. - -This mitigates the need make firewall changes to replay messages from a Streamdal -collection back to your message bus. - -See https://docs.streamdal.com/what-are/what-are-destinations/plumber-as-a-destination -for full documentation. - ## High Performance & High Availability `plumber` comes with a "server" mode which will cause plumber to operate as a highly available cluster. diff --git a/actions/actions.go b/actions/actions.go index c105baf76..4ab13b659 100644 --- a/actions/actions.go +++ b/actions/actions.go @@ -38,13 +38,6 @@ type IActions interface { ResumeRelay(ctx context.Context, relayID string) (*types.Relay, error) UpdateRelay(ctx context.Context, relayID string, relayOpts *opts.RelayOptions) (*types.Relay, error) - // tunnel - CreateTunnel(reqCtx context.Context, tunnelOpts *opts.TunnelOptions) (*types.Tunnel, error) - ResumeTunnel(ctx context.Context, tunnelID string) (*types.Tunnel, error) - StopTunnel(ctx context.Context, tunnelID string) (*types.Tunnel, error) - UpdateTunnel(ctx context.Context, tunnelID string, tunnelOpts *opts.TunnelOptions) (*types.Tunnel, error) - DeleteTunnel(ctx context.Context, tunnelID string) error - UpdateConnection(ctx context.Context, connectionID string, connOpts *opts.ConnectionOptions) (*types.Connection, error) } diff --git a/actions/actionsfakes/fake_iactions.go b/actions/actionsfakes/fake_iactions.go index a7d9babb9..f4a0bb06e 100644 --- a/actions/actionsfakes/fake_iactions.go +++ b/actions/actionsfakes/fake_iactions.go @@ -6,7 +6,6 @@ import ( "sync" "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/streamdal/plumber/actions" "github.com/streamdal/plumber/server/types" ) @@ -26,20 +25,6 @@ type FakeIActions struct { result1 *types.Relay result2 error } - CreateTunnelStub func(context.Context, *opts.TunnelOptions) (*types.Tunnel, error) - createTunnelMutex sync.RWMutex - createTunnelArgsForCall []struct { - arg1 context.Context - arg2 *opts.TunnelOptions - } - createTunnelReturns struct { - result1 *types.Tunnel - result2 error - } - createTunnelReturnsOnCall map[int]struct { - result1 *types.Tunnel - result2 error - } DeleteRelayStub func(context.Context, string) (*types.Relay, error) deleteRelayMutex sync.RWMutex deleteRelayArgsForCall []struct { @@ -54,18 +39,6 @@ type FakeIActions struct { result1 *types.Relay result2 error } - DeleteTunnelStub func(context.Context, string) error - deleteTunnelMutex sync.RWMutex - deleteTunnelArgsForCall []struct { - arg1 context.Context - arg2 string - } - deleteTunnelReturns struct { - result1 error - } - deleteTunnelReturnsOnCall map[int]struct { - result1 error - } ResumeRelayStub func(context.Context, string) (*types.Relay, error) resumeRelayMutex sync.RWMutex resumeRelayArgsForCall []struct { @@ -80,20 +53,6 @@ type FakeIActions struct { result1 *types.Relay result2 error } - ResumeTunnelStub func(context.Context, string) (*types.Tunnel, error) - resumeTunnelMutex sync.RWMutex - resumeTunnelArgsForCall []struct { - arg1 context.Context - arg2 string - } - resumeTunnelReturns struct { - result1 *types.Tunnel - result2 error - } - resumeTunnelReturnsOnCall map[int]struct { - result1 *types.Tunnel - result2 error - } StopRelayStub func(context.Context, string) (*types.Relay, error) stopRelayMutex sync.RWMutex stopRelayArgsForCall []struct { @@ -108,20 +67,6 @@ type FakeIActions struct { result1 *types.Relay result2 error } - StopTunnelStub func(context.Context, string) (*types.Tunnel, error) - stopTunnelMutex sync.RWMutex - stopTunnelArgsForCall []struct { - arg1 context.Context - arg2 string - } - stopTunnelReturns struct { - result1 *types.Tunnel - result2 error - } - stopTunnelReturnsOnCall map[int]struct { - result1 *types.Tunnel - result2 error - } UpdateConnectionStub func(context.Context, string, *opts.ConnectionOptions) (*types.Connection, error) updateConnectionMutex sync.RWMutex updateConnectionArgsForCall []struct { @@ -152,21 +97,6 @@ type FakeIActions struct { result1 *types.Relay result2 error } - UpdateTunnelStub func(context.Context, string, *opts.TunnelOptions) (*types.Tunnel, error) - updateTunnelMutex sync.RWMutex - updateTunnelArgsForCall []struct { - arg1 context.Context - arg2 string - arg3 *opts.TunnelOptions - } - updateTunnelReturns struct { - result1 *types.Tunnel - result2 error - } - updateTunnelReturnsOnCall map[int]struct { - result1 *types.Tunnel - result2 error - } invocations map[string][][]interface{} invocationsMutex sync.RWMutex } @@ -236,71 +166,6 @@ func (fake *FakeIActions) CreateRelayReturnsOnCall(i int, result1 *types.Relay, }{result1, result2} } -func (fake *FakeIActions) CreateTunnel(arg1 context.Context, arg2 *opts.TunnelOptions) (*types.Tunnel, error) { - fake.createTunnelMutex.Lock() - ret, specificReturn := fake.createTunnelReturnsOnCall[len(fake.createTunnelArgsForCall)] - fake.createTunnelArgsForCall = append(fake.createTunnelArgsForCall, struct { - arg1 context.Context - arg2 *opts.TunnelOptions - }{arg1, arg2}) - stub := fake.CreateTunnelStub - fakeReturns := fake.createTunnelReturns - fake.recordInvocation("CreateTunnel", []interface{}{arg1, arg2}) - fake.createTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeIActions) CreateTunnelCallCount() int { - fake.createTunnelMutex.RLock() - defer fake.createTunnelMutex.RUnlock() - return len(fake.createTunnelArgsForCall) -} - -func (fake *FakeIActions) CreateTunnelCalls(stub func(context.Context, *opts.TunnelOptions) (*types.Tunnel, error)) { - fake.createTunnelMutex.Lock() - defer fake.createTunnelMutex.Unlock() - fake.CreateTunnelStub = stub -} - -func (fake *FakeIActions) CreateTunnelArgsForCall(i int) (context.Context, *opts.TunnelOptions) { - fake.createTunnelMutex.RLock() - defer fake.createTunnelMutex.RUnlock() - argsForCall := fake.createTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIActions) CreateTunnelReturns(result1 *types.Tunnel, result2 error) { - fake.createTunnelMutex.Lock() - defer fake.createTunnelMutex.Unlock() - fake.CreateTunnelStub = nil - fake.createTunnelReturns = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - -func (fake *FakeIActions) CreateTunnelReturnsOnCall(i int, result1 *types.Tunnel, result2 error) { - fake.createTunnelMutex.Lock() - defer fake.createTunnelMutex.Unlock() - fake.CreateTunnelStub = nil - if fake.createTunnelReturnsOnCall == nil { - fake.createTunnelReturnsOnCall = make(map[int]struct { - result1 *types.Tunnel - result2 error - }) - } - fake.createTunnelReturnsOnCall[i] = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - func (fake *FakeIActions) DeleteRelay(arg1 context.Context, arg2 string) (*types.Relay, error) { fake.deleteRelayMutex.Lock() ret, specificReturn := fake.deleteRelayReturnsOnCall[len(fake.deleteRelayArgsForCall)] @@ -366,68 +231,6 @@ func (fake *FakeIActions) DeleteRelayReturnsOnCall(i int, result1 *types.Relay, }{result1, result2} } -func (fake *FakeIActions) DeleteTunnel(arg1 context.Context, arg2 string) error { - fake.deleteTunnelMutex.Lock() - ret, specificReturn := fake.deleteTunnelReturnsOnCall[len(fake.deleteTunnelArgsForCall)] - fake.deleteTunnelArgsForCall = append(fake.deleteTunnelArgsForCall, struct { - arg1 context.Context - arg2 string - }{arg1, arg2}) - stub := fake.DeleteTunnelStub - fakeReturns := fake.deleteTunnelReturns - fake.recordInvocation("DeleteTunnel", []interface{}{arg1, arg2}) - fake.deleteTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeIActions) DeleteTunnelCallCount() int { - fake.deleteTunnelMutex.RLock() - defer fake.deleteTunnelMutex.RUnlock() - return len(fake.deleteTunnelArgsForCall) -} - -func (fake *FakeIActions) DeleteTunnelCalls(stub func(context.Context, string) error) { - fake.deleteTunnelMutex.Lock() - defer fake.deleteTunnelMutex.Unlock() - fake.DeleteTunnelStub = stub -} - -func (fake *FakeIActions) DeleteTunnelArgsForCall(i int) (context.Context, string) { - fake.deleteTunnelMutex.RLock() - defer fake.deleteTunnelMutex.RUnlock() - argsForCall := fake.deleteTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIActions) DeleteTunnelReturns(result1 error) { - fake.deleteTunnelMutex.Lock() - defer fake.deleteTunnelMutex.Unlock() - fake.DeleteTunnelStub = nil - fake.deleteTunnelReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeIActions) DeleteTunnelReturnsOnCall(i int, result1 error) { - fake.deleteTunnelMutex.Lock() - defer fake.deleteTunnelMutex.Unlock() - fake.DeleteTunnelStub = nil - if fake.deleteTunnelReturnsOnCall == nil { - fake.deleteTunnelReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.deleteTunnelReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeIActions) ResumeRelay(arg1 context.Context, arg2 string) (*types.Relay, error) { fake.resumeRelayMutex.Lock() ret, specificReturn := fake.resumeRelayReturnsOnCall[len(fake.resumeRelayArgsForCall)] @@ -493,71 +296,6 @@ func (fake *FakeIActions) ResumeRelayReturnsOnCall(i int, result1 *types.Relay, }{result1, result2} } -func (fake *FakeIActions) ResumeTunnel(arg1 context.Context, arg2 string) (*types.Tunnel, error) { - fake.resumeTunnelMutex.Lock() - ret, specificReturn := fake.resumeTunnelReturnsOnCall[len(fake.resumeTunnelArgsForCall)] - fake.resumeTunnelArgsForCall = append(fake.resumeTunnelArgsForCall, struct { - arg1 context.Context - arg2 string - }{arg1, arg2}) - stub := fake.ResumeTunnelStub - fakeReturns := fake.resumeTunnelReturns - fake.recordInvocation("ResumeTunnel", []interface{}{arg1, arg2}) - fake.resumeTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeIActions) ResumeTunnelCallCount() int { - fake.resumeTunnelMutex.RLock() - defer fake.resumeTunnelMutex.RUnlock() - return len(fake.resumeTunnelArgsForCall) -} - -func (fake *FakeIActions) ResumeTunnelCalls(stub func(context.Context, string) (*types.Tunnel, error)) { - fake.resumeTunnelMutex.Lock() - defer fake.resumeTunnelMutex.Unlock() - fake.ResumeTunnelStub = stub -} - -func (fake *FakeIActions) ResumeTunnelArgsForCall(i int) (context.Context, string) { - fake.resumeTunnelMutex.RLock() - defer fake.resumeTunnelMutex.RUnlock() - argsForCall := fake.resumeTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIActions) ResumeTunnelReturns(result1 *types.Tunnel, result2 error) { - fake.resumeTunnelMutex.Lock() - defer fake.resumeTunnelMutex.Unlock() - fake.ResumeTunnelStub = nil - fake.resumeTunnelReturns = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - -func (fake *FakeIActions) ResumeTunnelReturnsOnCall(i int, result1 *types.Tunnel, result2 error) { - fake.resumeTunnelMutex.Lock() - defer fake.resumeTunnelMutex.Unlock() - fake.ResumeTunnelStub = nil - if fake.resumeTunnelReturnsOnCall == nil { - fake.resumeTunnelReturnsOnCall = make(map[int]struct { - result1 *types.Tunnel - result2 error - }) - } - fake.resumeTunnelReturnsOnCall[i] = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - func (fake *FakeIActions) StopRelay(arg1 context.Context, arg2 string) (*types.Relay, error) { fake.stopRelayMutex.Lock() ret, specificReturn := fake.stopRelayReturnsOnCall[len(fake.stopRelayArgsForCall)] @@ -623,71 +361,6 @@ func (fake *FakeIActions) StopRelayReturnsOnCall(i int, result1 *types.Relay, re }{result1, result2} } -func (fake *FakeIActions) StopTunnel(arg1 context.Context, arg2 string) (*types.Tunnel, error) { - fake.stopTunnelMutex.Lock() - ret, specificReturn := fake.stopTunnelReturnsOnCall[len(fake.stopTunnelArgsForCall)] - fake.stopTunnelArgsForCall = append(fake.stopTunnelArgsForCall, struct { - arg1 context.Context - arg2 string - }{arg1, arg2}) - stub := fake.StopTunnelStub - fakeReturns := fake.stopTunnelReturns - fake.recordInvocation("StopTunnel", []interface{}{arg1, arg2}) - fake.stopTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeIActions) StopTunnelCallCount() int { - fake.stopTunnelMutex.RLock() - defer fake.stopTunnelMutex.RUnlock() - return len(fake.stopTunnelArgsForCall) -} - -func (fake *FakeIActions) StopTunnelCalls(stub func(context.Context, string) (*types.Tunnel, error)) { - fake.stopTunnelMutex.Lock() - defer fake.stopTunnelMutex.Unlock() - fake.StopTunnelStub = stub -} - -func (fake *FakeIActions) StopTunnelArgsForCall(i int) (context.Context, string) { - fake.stopTunnelMutex.RLock() - defer fake.stopTunnelMutex.RUnlock() - argsForCall := fake.stopTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIActions) StopTunnelReturns(result1 *types.Tunnel, result2 error) { - fake.stopTunnelMutex.Lock() - defer fake.stopTunnelMutex.Unlock() - fake.StopTunnelStub = nil - fake.stopTunnelReturns = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - -func (fake *FakeIActions) StopTunnelReturnsOnCall(i int, result1 *types.Tunnel, result2 error) { - fake.stopTunnelMutex.Lock() - defer fake.stopTunnelMutex.Unlock() - fake.StopTunnelStub = nil - if fake.stopTunnelReturnsOnCall == nil { - fake.stopTunnelReturnsOnCall = make(map[int]struct { - result1 *types.Tunnel - result2 error - }) - } - fake.stopTunnelReturnsOnCall[i] = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - func (fake *FakeIActions) UpdateConnection(arg1 context.Context, arg2 string, arg3 *opts.ConnectionOptions) (*types.Connection, error) { fake.updateConnectionMutex.Lock() ret, specificReturn := fake.updateConnectionReturnsOnCall[len(fake.updateConnectionArgsForCall)] @@ -820,97 +493,21 @@ func (fake *FakeIActions) UpdateRelayReturnsOnCall(i int, result1 *types.Relay, }{result1, result2} } -func (fake *FakeIActions) UpdateTunnel(arg1 context.Context, arg2 string, arg3 *opts.TunnelOptions) (*types.Tunnel, error) { - fake.updateTunnelMutex.Lock() - ret, specificReturn := fake.updateTunnelReturnsOnCall[len(fake.updateTunnelArgsForCall)] - fake.updateTunnelArgsForCall = append(fake.updateTunnelArgsForCall, struct { - arg1 context.Context - arg2 string - arg3 *opts.TunnelOptions - }{arg1, arg2, arg3}) - stub := fake.UpdateTunnelStub - fakeReturns := fake.updateTunnelReturns - fake.recordInvocation("UpdateTunnel", []interface{}{arg1, arg2, arg3}) - fake.updateTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2, arg3) - } - if specificReturn { - return ret.result1, ret.result2 - } - return fakeReturns.result1, fakeReturns.result2 -} - -func (fake *FakeIActions) UpdateTunnelCallCount() int { - fake.updateTunnelMutex.RLock() - defer fake.updateTunnelMutex.RUnlock() - return len(fake.updateTunnelArgsForCall) -} - -func (fake *FakeIActions) UpdateTunnelCalls(stub func(context.Context, string, *opts.TunnelOptions) (*types.Tunnel, error)) { - fake.updateTunnelMutex.Lock() - defer fake.updateTunnelMutex.Unlock() - fake.UpdateTunnelStub = stub -} - -func (fake *FakeIActions) UpdateTunnelArgsForCall(i int) (context.Context, string, *opts.TunnelOptions) { - fake.updateTunnelMutex.RLock() - defer fake.updateTunnelMutex.RUnlock() - argsForCall := fake.updateTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 -} - -func (fake *FakeIActions) UpdateTunnelReturns(result1 *types.Tunnel, result2 error) { - fake.updateTunnelMutex.Lock() - defer fake.updateTunnelMutex.Unlock() - fake.UpdateTunnelStub = nil - fake.updateTunnelReturns = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - -func (fake *FakeIActions) UpdateTunnelReturnsOnCall(i int, result1 *types.Tunnel, result2 error) { - fake.updateTunnelMutex.Lock() - defer fake.updateTunnelMutex.Unlock() - fake.UpdateTunnelStub = nil - if fake.updateTunnelReturnsOnCall == nil { - fake.updateTunnelReturnsOnCall = make(map[int]struct { - result1 *types.Tunnel - result2 error - }) - } - fake.updateTunnelReturnsOnCall[i] = struct { - result1 *types.Tunnel - result2 error - }{result1, result2} -} - func (fake *FakeIActions) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() fake.createRelayMutex.RLock() defer fake.createRelayMutex.RUnlock() - fake.createTunnelMutex.RLock() - defer fake.createTunnelMutex.RUnlock() fake.deleteRelayMutex.RLock() defer fake.deleteRelayMutex.RUnlock() - fake.deleteTunnelMutex.RLock() - defer fake.deleteTunnelMutex.RUnlock() fake.resumeRelayMutex.RLock() defer fake.resumeRelayMutex.RUnlock() - fake.resumeTunnelMutex.RLock() - defer fake.resumeTunnelMutex.RUnlock() fake.stopRelayMutex.RLock() defer fake.stopRelayMutex.RUnlock() - fake.stopTunnelMutex.RLock() - defer fake.stopTunnelMutex.RUnlock() fake.updateConnectionMutex.RLock() defer fake.updateConnectionMutex.RUnlock() fake.updateRelayMutex.RLock() defer fake.updateRelayMutex.RUnlock() - fake.updateTunnelMutex.RLock() - defer fake.updateTunnelMutex.RUnlock() copiedInvocations := map[string][][]interface{}{} for key, value := range fake.invocations { copiedInvocations[key] = value diff --git a/actions/tunnel.go b/actions/tunnel.go deleted file mode 100644 index 9e24c96d5..000000000 --- a/actions/tunnel.go +++ /dev/null @@ -1,243 +0,0 @@ -package actions - -import ( - "context" - "time" - - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - - "github.com/streamdal/plumber/backends" - "github.com/streamdal/plumber/options" - "github.com/streamdal/plumber/prometheus" - "github.com/streamdal/plumber/server/types" - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (a *Actions) CreateTunnel(reqCtx context.Context, tunnelOpts *opts.TunnelOptions) (*types.Tunnel, error) { - if err := validate.TunnelOptionsForServer(tunnelOpts); err != nil { - return nil, errors.Wrap(err, "unable to validate tunnel options") - } - - // Get stored connection information - conn := a.cfg.PersistentConfig.GetConnection(tunnelOpts.ConnectionId) - if conn == nil { - return nil, validate.ErrConnectionNotFound - } - - // Try to create a backend from given connection options - be, err := backends.New(conn.Connection) - if err != nil { - return nil, errors.Wrap(err, "unable to create backend") - } - - // Used to shutdown tunnel on StopTunnel() gRPC call - shutdownCtx, shutdownFunc := context.WithCancel(context.Background()) - - d := &types.Tunnel{ - Active: false, - Id: tunnelOpts.XTunnelId, - CancelCtx: shutdownCtx, - CancelFunc: shutdownFunc, - Backend: be, - Options: tunnelOpts, - PlumberClusterID: a.cfg.PersistentConfig.ClusterID, - PlumberID: a.cfg.PersistentConfig.PlumberID, - PlumberVersion: options.VERSION, - } - - // If a tunnel is in the process of starting and it gets deleted, we must have - // CancelFunc and CancelCtx set so that DeleteTunnel() can trigger - a.cfg.PersistentConfig.SetTunnel(tunnelOpts.XTunnelId, d) - - // Run the tunnel if it's active on other plumber instances - if tunnelOpts.XActive { - // This will block for 5 seconds - if err := d.StartTunnel(5 * time.Second); err != nil { - return nil, errors.Wrap(err, "unable to start tunnel") - } - - d.Active = true - d.Options.XActive = true - - // Update metrics - prometheus.IncrPromGauge(prometheus.PlumberTunnels) - } - - a.cfg.PersistentConfig.SetTunnel(tunnelOpts.XTunnelId, d) - a.cfg.PersistentConfig.Save() - - return d, nil -} - -func (a *Actions) ResumeTunnel(ctx context.Context, tunnelID string) (*types.Tunnel, error) { - d := a.cfg.PersistentConfig.GetTunnel(tunnelID) - if d == nil { - return nil, errors.New("tunnel does not exist") - } - - // New contexts - ctx, cancelFunc := context.WithCancel(context.Background()) - d.CancelCtx = ctx - d.CancelFunc = cancelFunc - - // New backend connection - conn := a.cfg.PersistentConfig.GetConnection(d.Options.ConnectionId) - if conn == nil { - return nil, validate.ErrConnectionNotFound - } - - be, err := backends.New(conn.Connection) - if err != nil { - return nil, errors.Wrap(err, "unable to create backend connection") - } - d.Backend = be - - if err := d.StartTunnel(5 * time.Second); err != nil { - return nil, errors.Wrap(err, "unable to start tunnel") - } - - d.Active = true - d.Options.XActive = true - - // Update metrics - prometheus.IncrPromGauge(prometheus.PlumberTunnels) - - a.cfg.PersistentConfig.SetTunnel(tunnelID, d) - a.cfg.PersistentConfig.Save() - - return d, nil -} - -func (a *Actions) StopTunnel(ctx context.Context, tunnelID string) (*types.Tunnel, error) { - d := a.cfg.PersistentConfig.GetTunnel(tunnelID) - if d == nil { - return nil, errors.New("Tunnel replay does not exist") - } - - if !d.Active { - return nil, errors.New("Tunnel replay is not active") - } - - // Stop grpc client connection so we no longer receive messages from dProxy - d.CancelFunc() - d.Active = false - d.Options.XActive = false - - // Give it a sec - time.Sleep(time.Second) - - // Close gRPC connection to dProxy and backend connection to user's message bus - d.Close() - - // Update metrics - prometheus.DecrPromGauge(prometheus.PlumberTunnels) - - a.cfg.PersistentConfig.SetTunnel(tunnelID, d) - a.cfg.PersistentConfig.Save() - - return d, nil -} - -func (a *Actions) UpdateTunnel(ctx context.Context, tunnelID string, tunnelOpts *opts.TunnelOptions) (*types.Tunnel, error) { - d := a.cfg.PersistentConfig.GetTunnel(tunnelID) - if d == nil { - return nil, errors.New("tunnel does not exist") - } - - if d.Active { - // Close existing tunnel - d.CancelFunc() - d.Active = false - d.Options.XActive = false - d.Close() - - // Give it a sec to close out connections and goroutines - time.Sleep(time.Second) - } - - d.Options = tunnelOpts - - // New contexts - ctx, cancelFunc := context.WithCancel(context.Background()) - d.CancelCtx = ctx - d.CancelFunc = cancelFunc - - // New backend connection - conn := a.cfg.PersistentConfig.GetConnection(d.Options.ConnectionId) - if conn == nil { - return nil, validate.ErrConnectionNotFound - } - - be, err := backends.New(conn.Connection) - if err != nil { - return nil, errors.Wrap(err, "unable to create backend connection") - } - d.Backend = be - - if tunnelOpts.XActive { - if err := d.StartTunnel(5 * time.Second); err != nil { - d.Options.XActive = true - return nil, errors.Wrap(err, "unable to start tunnel") - } - - d.Active = true - d.Options.XActive = true - } - - // Update in-memory config - a.cfg.PersistentConfig.SetTunnel(tunnelID, d) - a.cfg.PersistentConfig.Save() - - // Update metrics - prometheus.IncrPromGauge(prometheus.PlumberTunnels) - - return d, nil -} - -func (a *Actions) DeleteTunnel(ctx context.Context, tunnelID string) error { - tunnelCfg := a.cfg.PersistentConfig.GetTunnel(tunnelID) - if tunnelCfg == nil { - return errors.New("tunnel does not exist") - } - - // Stop grpc client connection so we no longer receive messages from dProxy - if tunnelCfg.Active { - // Cancel reader worker - tunnelCfg.CancelFunc() - - // Give it a sec to finish - time.Sleep(time.Second) - - // Clean up gRPC connection to dProxy and connection to client's backend message bus - tunnelCfg.Close() - } - - // Delete in memory - a.cfg.PersistentConfig.DeleteTunnel(tunnelID) - a.cfg.PersistentConfig.Save() - - // Update metrics - prometheus.DecrPromGauge(prometheus.PlumberTunnels) - - t, err := tunnel.New(tunnelCfg.Options, &tunnel.Config{ - PlumberVersion: options.VERSION, - PlumberClusterID: a.cfg.PersistentConfig.ClusterID, - PlumberID: a.cfg.PersistentConfig.PlumberID, - }) - if err != nil { - // Don't pass the error up stack here since the user can't act on it - a.log.Error(errors.Wrap(err, "unable to delete tunnel in dProxy service")) - return nil - } - - if err := t.Delete(ctx, tunnelID); err != nil { - // Don't pass the error up stack here since the user can't act on it - a.log.Error(err) - return nil - } - - return nil -} diff --git a/backends/activemq/tunnel.go b/backends/activemq/tunnel.go deleted file mode 100644 index 2d54e37f1..000000000 --- a/backends/activemq/tunnel.go +++ /dev/null @@ -1,71 +0,0 @@ -package activemq - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (a *ActiveMQ) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := a.log.WithField("pkg", "activemq/tunnel") - - if err := tunnelSvc.Start(ctx, "ActiveMQ", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - destination := getDestinationWrite(tunnelOpts.Activemq.Args) - - outboundCh := tunnelSvc.Read() - - for { - select { - case outbound := <-outboundCh: - if err := a.client.Send(destination, "", outbound.Blob, nil); err != nil { - llog.Errorf("Unable to replay message: %s", err) - break - } - - llog.Debugf("Replayed message to ActiveMQ '%s' for replay '%s'", destination, outbound.ReplayId) - case <-ctx.Done(): - a.log.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.Activemq == nil { - return validate.ErrEmptyBackendGroup - } - - args := tunnelOpts.Activemq.Args - if args == nil { - return validate.ErrEmptyBackendArgs - } - - if args.Queue == "" && args.Topic == "" { - return ErrTopicOrQueue - } - - if args.Queue != "" && args.Topic != "" { - return ErrTopicAndQueue - } - - return nil -} diff --git a/backends/awskinesis/tunnel.go b/backends/awskinesis/tunnel.go deleted file mode 100644 index 33caee118..000000000 --- a/backends/awskinesis/tunnel.go +++ /dev/null @@ -1,81 +0,0 @@ -package awskinesis - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/pkg/errors" - - "github.com/streamdal/plumber/validate" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" -) - -func (k *Kinesis) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "unable to validate tunnel options") - } - - llog := k.log.WithField("pkg", "kinesis/tunnel") - - if err := tunnelSvc.Start(ctx, "AWS Kinesis", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - args := tunnelOpts.AwsKinesis.Args - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - putOpts := &kinesis.PutRecordInput{ - Data: outbound.Blob, - PartitionKey: aws.String(args.PartitionKey), - StreamName: aws.String(args.Stream), - } - - if _, err := k.client.PutRecord(putOpts); err != nil { - k.log.Errorf("Unable to replay message: %s", err) - break - } - - k.log.Debugf("Replayed message to Kinesis stream '%s' for replay '%s'", args.Stream, outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.AwsKinesis == nil { - return validate.ErrEmptyBackendGroup - } - - args := tunnelOpts.AwsKinesis.Args - if args == nil { - return validate.ErrEmptyBackendArgs - } - - if args.Stream == "" { - return ErrEmptyStream - } - - if args.PartitionKey == "" { - return ErrEmptyPartitionKey - } - - return nil -} diff --git a/backends/awskinesis/tunnel_test.go b/backends/awskinesis/tunnel_test.go deleted file mode 100644 index ecc2936c0..000000000 --- a/backends/awskinesis/tunnel_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package awskinesis - -import ( - "context" - "io/ioutil" - - "github.com/aws/aws-sdk-go/service/kinesis" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/backends/awskinesis/kinesisfakes" - "github.com/streamdal/plumber/tunnel/tunnelfakes" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("AWS Kinesis Backend", func() { - var tunnelOpts *opts.TunnelOptions - - BeforeEach(func() { - tunnelOpts = &opts.TunnelOptions{ - AwsKinesis: &opts.TunnelGroupAWSKinesisOptions{ - Args: &args.AWSKinesisWriteArgs{ - Stream: "test", - PartitionKey: "test", - SequenceNumber: "1", - }, - }, - } - }) - - Context("validateTunnelOptions", func() { - It("validates nil tunnel options", func() { - err := validateTunnelOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.AwsKinesis = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.AwsKinesis.Args = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - It("validates empty topic", func() { - tunnelOpts.AwsKinesis.Args.Stream = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrEmptyStream)) - }) - It("validates empty partition key", func() { - tunnelOpts.AwsKinesis.Args.PartitionKey = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrEmptyPartitionKey)) - }) - It("passes validation", func() { - err := validateTunnelOptions(tunnelOpts) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Tunnel", func() { - var fakeTunnel *tunnelfakes.FakeITunnel - - BeforeEach(func() { - fakeTunnel = &tunnelfakes.FakeITunnel{} - fakeTunnel.ReadStub = func() chan *events.Outbound { - ch := make(chan *events.Outbound, 1) - ch <- &events.Outbound{Blob: []byte(`testing`)} - return ch - } - }) - - It("validates tunnel options", func() { - errorCh := make(chan *records.ErrorRecord) - err := (&Kinesis{}).Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - - It("replays a message", func() { - ctx, cancel := context.WithCancel(context.Background()) - - fakeKinesis := &kinesisfakes.FakeKinesisAPI{} - fakeKinesis.PutRecordStub = func(*kinesis.PutRecordInput) (*kinesis.PutRecordOutput, error) { - defer cancel() - return &kinesis.PutRecordOutput{}, nil - } - - p := &Kinesis{ - client: fakeKinesis, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).ToNot(HaveOccurred()) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeKinesis.PutRecordCallCount()).To(Equal(1)) - }) - }) -}) diff --git a/backends/awssns/tunnel.go b/backends/awssns/tunnel.go deleted file mode 100644 index 374578d03..000000000 --- a/backends/awssns/tunnel.go +++ /dev/null @@ -1,78 +0,0 @@ -package awssns - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/sns" - "github.com/pkg/errors" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" -) - -func (a *AWSSNS) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "unable to validate tunnel options") - } - - llog := a.log.WithField("pkg", "activemq/tunnel") - - if err := tunnelSvc.Start(ctx, "AWS SNS", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - topic := tunnelOpts.AwsSns.Args.Topic - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - _, err := a.Service.Publish(&sns.PublishInput{ - Message: aws.String(string(outbound.Blob)), - TopicArn: aws.String(topic), - }) - if err != nil { - llog.Errorf("Unable to replay message: %s", err) - break - } - - llog.Debugf("Replayed message to AWSSNS topic '%s' for replay '%s'", topic, outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.AwsSns == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.AwsSns.Args == nil { - return validate.ErrEmptyBackendArgs - } - - topic := tunnelOpts.AwsSns.Args.Topic - - if topic == "" { - return ErrMissingTopicARN - } - - if arn.IsARN(topic) == false { - return fmt.Errorf("'%s' is not a valid ARN", topic) - } - return nil -} diff --git a/backends/awssns/tunnel_test.go b/backends/awssns/tunnel_test.go deleted file mode 100644 index 330d0b3c9..000000000 --- a/backends/awssns/tunnel_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package awssns - -import ( - "context" - "errors" - "io/ioutil" - - "github.com/aws/aws-sdk-go/service/sns" - "github.com/batchcorp/collector-schemas/build/go/protos/events" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/backends/awssns/snsfakes" - "github.com/streamdal/plumber/tunnel/tunnelfakes" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("", func() { - defer GinkgoRecover() - - Context("validateTunnelOptions", func() { - It("validtes nil tunnel options", func() { - err := validateTunnelOptions(nil) - - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - - It("validates nil backend group", func() { - tunnelOpts := &opts.TunnelOptions{} - - err := validateTunnelOptions(tunnelOpts) - - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - - It("validates nil backend args", func() { - tunnelOpts := &opts.TunnelOptions{ - AwsSns: &opts.TunnelGroupAWSSNSOptions{}, - } - - err := validateTunnelOptions(tunnelOpts) - - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - - It("validates empty topic", func() { - tunnelOpts := &opts.TunnelOptions{ - AwsSns: &opts.TunnelGroupAWSSNSOptions{ - Args: &args.AWSSNSWriteArgs{ - Topic: "", - }, - }, - } - - err := validateTunnelOptions(tunnelOpts) - - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrMissingTopicARN)) - }) - - It("validates topic value", func() { - tunnelOpts := &opts.TunnelOptions{ - AwsSns: &opts.TunnelGroupAWSSNSOptions{ - Args: &args.AWSSNSWriteArgs{ - Topic: "invalidtopic", - }, - }, - } - - err := validateTunnelOptions(tunnelOpts) - - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(Equal("'invalidtopic' is not a valid ARN")) - }) - - It("passes validation", func() { - tunnelOpts := &opts.TunnelOptions{ - AwsSns: &opts.TunnelGroupAWSSNSOptions{ - Args: &args.AWSSNSWriteArgs{ - Topic: "arn:aws:sns:us-east-1:123456789012:topic", - }, - }, - } - - err := validateTunnelOptions(tunnelOpts) - - Expect(err).To(BeNil()) - }) - }) - - Context("Tunnel", func() { - var fakeSNS *snsfakes.FakeSNSAPI - var fakeTunnel *tunnelfakes.FakeITunnel - var errorCh chan *records.ErrorRecord - tunnelOpts := &opts.TunnelOptions{ - AwsSns: &opts.TunnelGroupAWSSNSOptions{ - Args: &args.AWSSNSWriteArgs{ - Topic: "arn:aws:sns:us-east-1:123456789012:topic", - }, - }, - } - - BeforeEach(func() { - errorCh = make(chan *records.ErrorRecord) - - fakeSNS = &snsfakes.FakeSNSAPI{} - - fakeTunnel = &tunnelfakes.FakeITunnel{} - fakeTunnel.StartStub = func(context.Context, string, chan<- *records.ErrorRecord) error { - return nil - } - fakeTunnel.ReadStub = func() chan *events.Outbound { - ch := make(chan *events.Outbound, 1) - ch <- &events.Outbound{Blob: []byte(`testing`)} - return ch - } - - }) - - It("validates tunnel options", func() { - err := (&AWSSNS{}).Tunnel(context.Background(), nil, nil, nil) - - Expect(err.Error()).To(ContainSubstring("unable to validate tunnel options")) - Expect(fakeTunnel.StartCallCount()).To(Equal(0)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(0)) - }) - - It("validates creating of tunnel", func() { - fakeTunnel.StartStub = func(context.Context, string, chan<- *records.ErrorRecord) error { - return errors.New("start error") - } - - p := &AWSSNS{ - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - err := p.Tunnel(context.Background(), tunnelOpts, fakeTunnel, errorCh) - Expect(err.Error()).To(ContainSubstring("unable to create tunnel")) - Expect(fakeTunnel.StartCallCount()).To(Equal(1)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(0)) - - }) - - It("returns an error on failure to write a message", func() { - ctx, cancel := context.WithCancel(context.Background()) - - fakeSNS.PublishStub = func(pi *sns.PublishInput) (*sns.PublishOutput, error) { - defer cancel() - return &sns.PublishOutput{}, errors.New("publish error") - } - - p := &AWSSNS{ - Service: fakeSNS, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).ToNot(HaveOccurred()) - Expect(fakeTunnel.StartCallCount()).To(Equal(1)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeSNS.PublishCallCount()).To(Equal(1)) - - }) - - It("replays a message", func() { - ctx, cancel := context.WithCancel(context.Background()) - - fakeSNS.PublishStub = func(pi *sns.PublishInput) (*sns.PublishOutput, error) { - defer cancel() - return &sns.PublishOutput{}, nil - } - - p := &AWSSNS{ - Service: fakeSNS, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).ToNot(HaveOccurred()) - Expect(fakeTunnel.StartCallCount()).To(Equal(1)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeSNS.PublishCallCount()).To(Equal(1)) - }) - }) -}) diff --git a/backends/awssqs/tunnel.go b/backends/awssqs/tunnel.go deleted file mode 100644 index 572f65db7..000000000 --- a/backends/awssqs/tunnel.go +++ /dev/null @@ -1,72 +0,0 @@ -package awssqs - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (a *AWSSQS) Tunnel(ctx context.Context, opts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(opts); err != nil { - return errors.Wrap(err, "unable to validate tunnel options") - } - - llog := a.log.WithField("pkg", "awssqs/tunnel") - - args := opts.AwsSqs.Args - - queueURL, err := a.getQueueURL(args.QueueName, args.RemoteAccountId) - if err != nil { - return errors.Wrap(err, "unable to get queue url") - } - - if err := tunnelSvc.Start(ctx, "AWS SQS", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - for { - select { - case outbound := <-outboundCh: - // write - if err := a.writeMsg(args, string(outbound.Blob), queueURL); err != nil { - err = fmt.Errorf("unable to replay message: %s", err) - llog.Error(err) - return err - } - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.AwsSqs == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.AwsSqs.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.AwsSqs.Args.QueueName == "" { - return ErrMissingQueue - } - - return nil -} diff --git a/backends/awssqs/tunnel_test.go b/backends/awssqs/tunnel_test.go deleted file mode 100644 index cb38df207..000000000 --- a/backends/awssqs/tunnel_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package awssqs - -import ( - "context" - "errors" - "io/ioutil" - - "github.com/aws/aws-sdk-go/service/sqs" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/backends/awssqs/sqsfakes" - "github.com/streamdal/plumber/tunnel/tunnelfakes" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("AWSSQS Backend", func() { - var tunnelOpts *opts.TunnelOptions - - BeforeEach(func() { - tunnelOpts = &opts.TunnelOptions{ - AwsSqs: &opts.TunnelGroupAWSSQSOptions{ - Args: &args.AWSSQSWriteArgs{ - QueueName: "testing.fifo", - MessageDeduplicationId: "test", - RemoteAccountId: "test", - Attributes: map[string]string{ - "test": "test", - }, - }, - }, - } - }) - - Context("validateTunnelOptions", func() { - It("validates nil tunnel options", func() { - err := validateTunnelOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.AwsSqs = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.AwsSqs.Args = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - It("validates empty topic", func() { - tunnelOpts.AwsSqs.Args.QueueName = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrMissingQueue)) - }) - It("passes validation", func() { - err := validateTunnelOptions(tunnelOpts) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Tunnel", func() { - var fakeTunnel *tunnelfakes.FakeITunnel - - BeforeEach(func() { - fakeTunnel = &tunnelfakes.FakeITunnel{} - fakeTunnel.ReadStub = func() chan *events.Outbound { - ch := make(chan *events.Outbound, 1) - ch <- &events.Outbound{Blob: []byte(`testing`)} - return ch - } - }) - - It("validates tunnel options", func() { - errorCh := make(chan *records.ErrorRecord) - err := (&AWSSQS{}).Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - - It("returns an error on failure to write a message", func() { - fakeSQS := &sqsfakes.FakeSQSAPI{} - fakeSQS.GetQueueUrlStub = func(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) { - return &sqs.GetQueueUrlOutput{}, nil - } - fakeSQS.SendMessageStub = func(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error) { - return nil, errors.New("test err") - } - - p := &AWSSQS{ - client: fakeSQS, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(context.Background(), tunnelOpts, fakeTunnel, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unable to replay message")) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeSQS.SendMessageCallCount()).To(Equal(1)) - }) - - It("replays a message", func() { - ctx, cancel := context.WithCancel(context.Background()) - - fakeSQS := &sqsfakes.FakeSQSAPI{} - fakeSQS.GetQueueUrlStub = func(*sqs.GetQueueUrlInput) (*sqs.GetQueueUrlOutput, error) { - return &sqs.GetQueueUrlOutput{}, nil - } - fakeSQS.SendMessageStub = func(*sqs.SendMessageInput) (*sqs.SendMessageOutput, error) { - defer cancel() - return &sqs.SendMessageOutput{}, nil - } - - p := &AWSSQS{ - client: fakeSQS, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).ToNot(HaveOccurred()) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeSQS.SendMessageCallCount()).To(Equal(1)) - }) - }) - -}) diff --git a/backends/azure-eventhub/tunnel.go b/backends/azure-eventhub/tunnel.go deleted file mode 100644 index 10372caf0..000000000 --- a/backends/azure-eventhub/tunnel.go +++ /dev/null @@ -1,71 +0,0 @@ -package azure_eventhub - -import ( - "context" - - eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (a *AzureEventHub) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := logrus.WithField("pkg", "azure-eventhub/tunnel") - - if err := tunnelSvc.Start(ctx, "Azure Event Hub", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - sendOpts := make([]eventhub.SendOption, 0) - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - - event := eventhub.NewEvent(outbound.Blob) - if tunnelOpts.AzureEventHub.Args.PartitionKey != "" { - event.PartitionKey = &tunnelOpts.AzureEventHub.Args.PartitionKey - } - - if err := a.client.Send(ctx, event, sendOpts...); err != nil { - llog.Errorf("Unable to replay message: %s", err) - break - } - - llog.Debugf("Replayed message to Azure Event Hub for replay '%s'", outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.AzureEventHub == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.AzureEventHub.Args == nil { - return validate.ErrEmptyBackendArgs - } - - return nil -} diff --git a/backends/azure-servicebus/tunnel.go b/backends/azure-servicebus/tunnel.go deleted file mode 100644 index 9e281ff47..000000000 --- a/backends/azure-servicebus/tunnel.go +++ /dev/null @@ -1,103 +0,0 @@ -package azure_servicebus - -import ( - "context" - - serviceBus "github.com/Azure/azure-service-bus-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" -) - -func (a *AzureServiceBus) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := logrus.WithField("pkg", "azure/tunnel") - - var queue *serviceBus.Queue - var topic *serviceBus.Topic - var err error - - if tunnelOpts.AzureServiceBus.Args.Queue != "" { - queue, err = a.client.NewQueue(tunnelOpts.AzureServiceBus.Args.Queue) - if err != nil { - return errors.Wrap(err, "unable to create new azure service bus queue client") - } - - defer queue.Close(ctx) - } else { - topic, err = a.client.NewTopic(tunnelOpts.AzureServiceBus.Args.Topic) - if err != nil { - return errors.Wrap(err, "unable to create new azure service bus topic client") - } - - defer topic.Close(ctx) - } - - if err := tunnelSvc.Start(ctx, "Azure Service Bus", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - msg := serviceBus.NewMessage(outbound.Blob) - - if queue != nil { - // Publishing to queue - if err := queue.Send(ctx, msg); err != nil { - llog.Errorf("Unable to replay message: %s", err) - break - } - } else { - // Publishing to topic - if err := topic.Send(ctx, msg); err != nil { - llog.Errorf("Unable to replay message: %s", err) - break - } - } - - llog.Debugf("Replayed message to Azure Service Bus for replay '%s'", outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.AzureServiceBus == nil { - return validate.ErrEmptyBackendGroup - } - - args := tunnelOpts.AzureServiceBus.Args - if tunnelOpts.AzureServiceBus.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if args.Queue == "" && args.Topic == "" { - return ErrQueueOrTopic - } - - if args.Queue != "" && args.Topic != "" { - return ErrQueueAndTopic - } - - return nil -} diff --git a/backends/backends.go b/backends/backends.go index 8e3acceb5..d2b7d232e 100644 --- a/backends/backends.go +++ b/backends/backends.go @@ -5,6 +5,8 @@ import ( "github.com/pkg/errors" + "github.com/batchcorp/plumber-schemas/build/go/protos/opts" + "github.com/batchcorp/plumber-schemas/build/go/protos/records" "github.com/streamdal/plumber/backends/activemq" "github.com/streamdal/plumber/backends/awskinesis" "github.com/streamdal/plumber/backends/awssns" @@ -27,10 +29,6 @@ import ( "github.com/streamdal/plumber/backends/rabbitmq" "github.com/streamdal/plumber/backends/rpubsub" "github.com/streamdal/plumber/backends/rstreams" - "github.com/streamdal/plumber/tunnel" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" ) // Backend is the interface that all backends implement; the interface is used @@ -61,10 +59,6 @@ type Backend interface { // put/get sample data). Test(ctx context.Context) error - // Tunnel creates a tunnel to Batch and exposes the connected backend as a - // destination. This is a blocking call. - Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error - // Relay will hook into a message bus as a consumer and relay all messages // to the relayCh; if an error channel is provided, any errors will be piped // to the channel as well. This method _usually_ blocks. diff --git a/backends/cdcmongo/tunnel.go b/backends/cdcmongo/tunnel.go deleted file mode 100644 index 88b009ce7..000000000 --- a/backends/cdcmongo/tunnel.go +++ /dev/null @@ -1,15 +0,0 @@ -package cdcmongo - -import ( - "context" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/types" -) - -func (m *Mongo) Tunnel(ctx context.Context, opts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - return types.NotImplementedErr -} diff --git a/backends/cdcpostgres/tunnel.go b/backends/cdcpostgres/tunnel.go deleted file mode 100644 index 6de356341..000000000 --- a/backends/cdcpostgres/tunnel.go +++ /dev/null @@ -1,15 +0,0 @@ -package cdcpostgres - -import ( - "context" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/types" -) - -func (c *CDCPostgres) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - return types.NotImplementedErr -} diff --git a/backends/gcppubsub/tunnel.go b/backends/gcppubsub/tunnel.go deleted file mode 100644 index 19aadd271..000000000 --- a/backends/gcppubsub/tunnel.go +++ /dev/null @@ -1,70 +0,0 @@ -package gcppubsub - -import ( - "context" - - "cloud.google.com/go/pubsub" - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (g *GCPPubSub) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.New("unable to validate write options") - } - - if err := tunnelSvc.Start(ctx, "GCP PubSub", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - t := g.client.Topic(tunnelOpts.GcpPubsub.Args.TopicId) - - outboundCh := tunnelSvc.Read() - - for { - select { - case outbound := <-outboundCh: - result := t.Publish(ctx, &pubsub.Message{ - Data: outbound.Blob, - }) - - if _, err := result.Get(ctx); err != nil { - g.log.Errorf("Unable to replay message: %s", err) - continue - } - - g.log.Debugf("Replayed message to GCP Pubsub topic '%s' for replay '%s'", tunnelOpts.GcpPubsub.Args.TopicId, outbound.ReplayId) - - case <-ctx.Done(): - g.log.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.GcpPubsub == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.GcpPubsub.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.GcpPubsub.Args.TopicId == "" { - return errors.New("Topic ID cannot be empty") - } - - return nil -} diff --git a/backends/kafka/tunnel.go b/backends/kafka/tunnel.go deleted file mode 100644 index 2873e43b2..000000000 --- a/backends/kafka/tunnel.go +++ /dev/null @@ -1,119 +0,0 @@ -package kafka - -import ( - "context" - "encoding/base64" - - "github.com/pkg/errors" - "github.com/segmentio/kafka-go" - skafka "github.com/segmentio/kafka-go" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/util" - "github.com/streamdal/plumber/validate" -) - -// Tunnels starts up a new GRPC client connected to the dProxy service and receives a stream of outbound replay messages -// which are then written to the message bus. -func (k *Kafka) Tunnel(ctx context.Context, opts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - llog := logrus.WithField("pkg", "kafka/tunnel") - - if err := validateTunnelOptions(opts); err != nil { - return errors.Wrap(err, "unable to validate tunnel options") - } - - // Start up writer - writer, err := NewWriter(k.dialer, k.connArgs) - if err != nil { - return errors.Wrap(err, "unable to create new writer") - } - - defer writer.Close() - - if err := tunnelSvc.Start(ctx, "Kafka", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. -MAIN: - for { - select { - case outbound := <-outboundCh: - headers := make([]kafka.Header, 0) - - if len(outbound.Metadata) > 0 { - headers = k.generateKafkaHeaders(outbound) - } - - for _, topic := range opts.Kafka.Args.Topics { - if err := writer.WriteMessages(ctx, skafka.Message{ - Topic: topic, - Key: []byte(opts.Kafka.Args.Key), - Value: outbound.Blob, - Headers: headers, - }); err != nil { - llog.Errorf("Unable to replay message: %s", err) - break MAIN - } - } - - case <-ctx.Done(): - k.log.Debug("context cancelled") - break MAIN - } - } - - k.log.Debug("tunnel exiting") - - return nil -} - -func (k *Kafka) generateKafkaHeaders(o *events.Outbound) []kafka.Header { - headers := make([]kafka.Header, 0) - - for mdKey, mdVal := range o.Metadata { - var value []byte - var err error - if util.IsBase64(mdVal) { - value, err = base64.StdEncoding.DecodeString(mdVal) - if err != nil { - k.log.Errorf("Unable to decode header '%s' with value '%s' for replay '%s'", mdKey, mdVal, o.ReplayId) - continue - } - } else { - value = []byte(mdVal) - } - - headers = append(headers, kafka.Header{Key: mdKey, Value: value}) - } - - return headers -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.Kafka == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.Kafka.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if len(tunnelOpts.Kafka.Args.Topics) == 0 { - return ErrMissingTopic - } - - return nil -} diff --git a/backends/kubemq-queue/tunnel.go b/backends/kubemq-queue/tunnel.go deleted file mode 100644 index 60b8727e9..000000000 --- a/backends/kubemq-queue/tunnel.go +++ /dev/null @@ -1,15 +0,0 @@ -package kubemq_queue - -import ( - "context" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/types" -) - -func (k *KubeMQ) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - return types.NotImplementedErr -} diff --git a/backends/memphis/tunnel.go b/backends/memphis/tunnel.go deleted file mode 100644 index 4d4adbf97..000000000 --- a/backends/memphis/tunnel.go +++ /dev/null @@ -1,82 +0,0 @@ -package memphis - -import ( - "context" - "fmt" - - "github.com/memphisdev/memphis.go" - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/util" - "github.com/streamdal/plumber/validate" -) - -func (m *Memphis) Tunnel(ctx context.Context, opts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(opts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := m.log.WithField("pkg", "memphis/tunnel") - - args := opts.GetMemphis().Args - - producer, err := m.client.CreateProducer(args.Station, args.ProducerName) - if err != nil { - return errors.Wrap(err, "unable to create Memphis producer") - } - - if err := tunnelSvc.Start(ctx, "Memphis", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - headers := genHeaders(args.Headers) - - po := make([]memphis.ProduceOpt, 0) - po = append(po, memphis.MsgHeaders(headers)) - - if args.MessageId != "" { - po = append(po, memphis.MsgId(args.MessageId)) - } - - outboundCh := tunnelSvc.Read() - - for { - select { - case outbound := <-outboundCh: - if err := producer.Produce(outbound.Blob, po...); err != nil { - util.WriteError(m.log, errorCh, fmt.Errorf("unable to write message to station '%s': %s", args.Station, err)) - } - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil - -} - -func validateTunnelOptions(opts *opts.TunnelOptions) error { - if opts == nil { - return validate.ErrEmptyTunnelOpts - } - - if opts.Memphis == nil { - return validate.ErrEmptyBackendGroup - } - - args := opts.Memphis.Args - if args == nil { - return validate.ErrEmptyBackendArgs - } - - if args.Station == "" { - return ErrEmptyStation - } - - return nil -} diff --git a/backends/mqtt/tunnel.go b/backends/mqtt/tunnel.go deleted file mode 100644 index 5af106b1f..000000000 --- a/backends/mqtt/tunnel.go +++ /dev/null @@ -1,73 +0,0 @@ -package mqtt - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/util" - "github.com/streamdal/plumber/validate" -) - -func (m *MQTT) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := m.log.WithField("pkg", "mqtt/tunnel") - - if err := tunnelSvc.Start(ctx, "MQTT", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - timeout := util.DurationSec(tunnelOpts.Mqtt.Args.WriteTimeoutSeconds) - topic := tunnelOpts.Mqtt.Args.Topic - - outboundCh := tunnelSvc.Read() - - for { - select { - case outbound := <-outboundCh: - token := m.client.Publish(topic, byte(int(m.connArgs.QosLevel)), false, outbound.Blob) - - if !token.WaitTimeout(timeout) { - return fmt.Errorf("timed out attempting to publish message after %d seconds", - tunnelOpts.Mqtt.Args.WriteTimeoutSeconds) - } - - if token.Error() != nil { - return errors.Wrap(token.Error(), "unable to replay message") - } - - llog.Debugf("Replayed message to MQTT topic '%s' for replay '%s'", topic, outbound.ReplayId) - case <-ctx.Done(): - m.log.Debug("context cancelled") - return nil - } - } -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.Mqtt == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.Mqtt.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.Mqtt.Args.Topic == "" { - return ErrEmptyTopic - } - - return nil -} diff --git a/backends/mqtt/tunnel_test.go b/backends/mqtt/tunnel_test.go deleted file mode 100644 index 898b4e55f..000000000 --- a/backends/mqtt/tunnel_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package mqtt - -import ( - "context" - "io/ioutil" - "time" - - mqtt "github.com/eclipse/paho.mqtt.golang" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tools/mqttfakes" - - "github.com/streamdal/plumber/tunnel/tunnelfakes" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("MQTT Backend", func() { - var tunnelOpts *opts.TunnelOptions - - BeforeEach(func() { - tunnelOpts = &opts.TunnelOptions{ - Mqtt: &opts.TunnelGroupMQTTOptions{ - Args: &args.MQTTWriteArgs{ - Topic: "test", - WriteTimeoutSeconds: 1, - }, - }, - } - }) - - Context("validateTunnelOptions", func() { - It("validates nil tunnel options", func() { - err := validateTunnelOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.Mqtt = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.Mqtt.Args = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - It("validates empty topic", func() { - tunnelOpts.Mqtt.Args.Topic = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrEmptyTopic)) - }) - It("passes validation", func() { - err := validateTunnelOptions(tunnelOpts) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Tunnel", func() { - var fakeTunnel *tunnelfakes.FakeITunnel - - BeforeEach(func() { - fakeTunnel = &tunnelfakes.FakeITunnel{} - fakeTunnel.ReadStub = func() chan *events.Outbound { - ch := make(chan *events.Outbound, 1) - ch <- &events.Outbound{Blob: []byte(`testing`)} - return ch - } - }) - - It("validates tunnel options", func() { - errorCh := make(chan *records.ErrorRecord) - err := (&MQTT{}).Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - - It("returns an error on publish timeout", func() { - fakeMQTT := &mqttfakes.FakeClient{} - fakeMQTT.PublishStub = func(string, byte, bool, interface{}) mqtt.Token { - return &mqttfakes.FakeToken{ - WaitTimeoutStub: func(_ time.Duration) bool { - return false - }, - } - } - - m := &MQTT{ - client: fakeMQTT, - connArgs: &args.MQTTConn{}, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := m.Tunnel(context.Background(), tunnelOpts, fakeTunnel, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("timed out")) - }) - - It("returns an error when publish fails", func() { - fakeMQTT := &mqttfakes.FakeClient{} - fakeMQTT.PublishStub = func(string, byte, bool, interface{}) mqtt.Token { - return &mqttfakes.FakeToken{ - ErrorStub: func() error { - return errors.New("test error") - }, - WaitTimeoutStub: func(_ time.Duration) bool { - return true - }, - } - } - - m := &MQTT{ - client: fakeMQTT, - connArgs: &args.MQTTConn{}, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := m.Tunnel(context.Background(), tunnelOpts, fakeTunnel, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("unable to replay message")) - }) - - It("replays a message", func() { - fakeMQTT := &mqttfakes.FakeClient{} - fakeMQTT.PublishStub = func(string, byte, bool, interface{}) mqtt.Token { - return &mqttfakes.FakeToken{ - ErrorStub: func() error { return nil }, - WaitTimeoutStub: func(_ time.Duration) bool { return true }, - } - } - - m := &MQTT{ - client: fakeMQTT, - connArgs: &args.MQTTConn{}, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(time.Second) - cancel() - }() - - errorCh := make(chan *records.ErrorRecord) - err := m.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).ToNot(HaveOccurred()) - Expect(fakeTunnel.StartCallCount()).To(Equal(1)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeMQTT.PublishCallCount()).To(Equal(1)) - }) - - }) -}) diff --git a/backends/nats-jetstream/tunnel.go b/backends/nats-jetstream/tunnel.go deleted file mode 100644 index a31ce9018..000000000 --- a/backends/nats-jetstream/tunnel.go +++ /dev/null @@ -1,68 +0,0 @@ -package nats_jetstream - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/streamdal/plumber/validate" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" -) - -func (n *NatsJetstream) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := n.log.WithField("pkg", "nats-jetstream/tunnel") - - if err := tunnelSvc.Start(ctx, "Nats", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - stream := tunnelOpts.NatsJetstream.Args.Subject - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - if err := n.client.Publish(stream, outbound.Blob); err != nil { - n.log.Errorf("Unable to replay message: %s", err) - break - } - - n.log.Debugf("Replayed message to Nats stream '%s' for replay '%s'", stream, outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.NatsJetstream == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.NatsJetstream.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.NatsJetstream.Args.Subject == "" { - return ErrMissingSubject - } - - return nil -} diff --git a/backends/nats-streaming/tunnel.go b/backends/nats-streaming/tunnel.go deleted file mode 100644 index c4fc4c0e4..000000000 --- a/backends/nats-streaming/tunnel.go +++ /dev/null @@ -1,67 +0,0 @@ -package nats_streaming - -import ( - "context" - "fmt" - - "github.com/streamdal/plumber/tunnel" - - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/validate" -) - -func (n *NatsStreaming) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := n.log.WithField("pkg", "nats-streaming/tunnel") - - if err := tunnelSvc.Start(ctx, "Nats Streaming", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - if err := n.stanClient.Publish(tunnelOpts.NatsStreaming.Args.Channel, outbound.Blob); err != nil { - err = fmt.Errorf("unable to replay message: %s", err) - llog.Error(err) - return err - } - - llog.Debugf("Replayed message to NATS streaming channel '%s' for replay '%s'", - tunnelOpts.NatsStreaming.Args.Channel, outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.NatsStreaming == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.NatsStreaming.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.NatsStreaming.Args.Channel == "" { - return ErrEmptyChannel - } - - return nil -} diff --git a/backends/nats-streaming/tunnel_test.go b/backends/nats-streaming/tunnel_test.go deleted file mode 100644 index 9ee11439e..000000000 --- a/backends/nats-streaming/tunnel_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package nats_streaming - -import ( - "context" - "errors" - "io/ioutil" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/backends/nats-streaming/stanfakes" - - "github.com/streamdal/plumber/tunnel/tunnelfakes" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("Nats Streaming Backend", func() { - var tunnelOpts *opts.TunnelOptions - - BeforeEach(func() { - tunnelOpts = &opts.TunnelOptions{ - NatsStreaming: &opts.TunnelGroupNatsStreamingOptions{ - Args: &args.NatsStreamingWriteArgs{ - Channel: "testing", - }, - }, - } - }) - - Context("validateTunnelOptions", func() { - It("validates nil tunnel options", func() { - err := validateTunnelOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.NatsStreaming = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.NatsStreaming.Args = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - It("validates empty topic", func() { - tunnelOpts.NatsStreaming.Args.Channel = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrEmptyChannel)) - }) - It("passes validation", func() { - err := validateTunnelOptions(tunnelOpts) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Tunnel", func() { - var fakeTunnel *tunnelfakes.FakeITunnel - - BeforeEach(func() { - fakeTunnel = &tunnelfakes.FakeITunnel{} - fakeTunnel.ReadStub = func() chan *events.Outbound { - ch := make(chan *events.Outbound, 1) - ch <- &events.Outbound{Blob: []byte(`testing`)} - return ch - } - }) - - It("validates tunnel options", func() { - errorCh := make(chan *records.ErrorRecord) - err := (&NatsStreaming{}).Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - - It("returns an error on publish failure", func() { - errTest := errors.New("test err") - - fakeStan := &stanfakes.FakeConn{} - fakeStan.PublishStub = func(string, []byte) error { - return errTest - } - - n := &NatsStreaming{ - stanClient: fakeStan, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - time.Sleep(time.Millisecond * 500) - cancel() - }() - - errorCh := make(chan *records.ErrorRecord) - err := n.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(errTest.Error())) - }) - - It("replays a message", func() { - fakeStan := &stanfakes.FakeConn{} - fakeStan.PublishStub = func(string, []byte) error { - return nil - } - - n := &NatsStreaming{ - stanClient: fakeStan, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - time.Sleep(time.Millisecond * 500) - cancel() - }() - - errorCh := make(chan *records.ErrorRecord) - err := n.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).ToNot(HaveOccurred()) - }) - }) - -}) diff --git a/backends/nats-streaming/write_test.go b/backends/nats-streaming/write_test.go index 4057c6852..8be79a66a 100644 --- a/backends/nats-streaming/write_test.go +++ b/backends/nats-streaming/write_test.go @@ -40,11 +40,6 @@ var _ = Describe("Nats Streaming Backend", func() { }) Context("validateWriteOptions", func() { - It("validates nil tunnel options", func() { - err := validateWriteOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyWriteOpts)) - }) It("validates nil backend group", func() { writeOpts.NatsStreaming = nil err := validateWriteOptions(writeOpts) diff --git a/backends/nats/tunnel.go b/backends/nats/tunnel.go deleted file mode 100644 index 25de22699..000000000 --- a/backends/nats/tunnel.go +++ /dev/null @@ -1,67 +0,0 @@ -package nats - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (n *Nats) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "unable to validate tunnel options") - } - - llog := n.log.WithField("pkg", "nats/tunnel") - - if err := tunnelSvc.Start(ctx, "Nats", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - subject := tunnelOpts.Nats.Args.Subject - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - if err := n.Client.Publish(subject, outbound.Blob); err != nil { - n.log.Errorf("Unable to replay message: %s", err) - break - } - - n.log.Debugf("Replayed message to Nats topic '%s' for replay '%s'", subject, outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.Nats == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.Nats.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.Nats.Args.Subject == "" { - return ErrMissingSubject - } - - return nil -} diff --git a/backends/nsq/tunnel.go b/backends/nsq/tunnel.go deleted file mode 100644 index 5fab6acae..000000000 --- a/backends/nsq/tunnel.go +++ /dev/null @@ -1,15 +0,0 @@ -package nsq - -import ( - "context" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/types" -) - -func (n *NSQ) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - return types.NotImplementedErr -} diff --git a/backends/pulsar/tunnel.go b/backends/pulsar/tunnel.go deleted file mode 100644 index ea3ef7592..000000000 --- a/backends/pulsar/tunnel.go +++ /dev/null @@ -1,70 +0,0 @@ -package pulsar - -import ( - "context" - - "github.com/apache/pulsar-client-go/pulsar" - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (p *Pulsar) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := p.log.WithField("pkg", "pulsar/tunnel") - - producer, err := p.client.CreateProducer(pulsar.ProducerOptions{Topic: tunnelOpts.Pulsar.Args.Topic}) - if err != nil { - return errors.Wrap(err, "unable to create Pulsar producer") - } - - if err := tunnelSvc.Start(ctx, "Apache Pulsar", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - for { - select { - case outbound := <-outboundCh: - if _, err := producer.Send(ctx, &pulsar.ProducerMessage{Payload: outbound.Blob}); err != nil { - err = errors.Wrap(err, "Unable to replay message") - llog.Error(err) - return err - } - - llog.Debugf("Replayed message to Pulsar topic '%s' for replay '%s'", - tunnelOpts.Pulsar.Args.Topic, outbound.ReplayId) - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.Pulsar == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.Pulsar.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.Pulsar.Args.Topic == "" { - return ErrEmptyTopic - } - - return nil -} diff --git a/backends/pulsar/tunnel_test.go b/backends/pulsar/tunnel_test.go deleted file mode 100644 index d3028e41b..000000000 --- a/backends/pulsar/tunnel_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package pulsar - -import ( - "context" - "io/ioutil" - "time" - - "github.com/apache/pulsar-client-go/pulsar" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/backends/pulsar/pulsarfakes" - - "github.com/streamdal/plumber/tunnel/tunnelfakes" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("Pulsar Backend", func() { - var tunnelOpts *opts.TunnelOptions - - BeforeEach(func() { - tunnelOpts = &opts.TunnelOptions{ - Pulsar: &opts.TunnelGroupPulsarOptions{ - Args: &args.PulsarWriteArgs{ - Topic: "testing", - }, - }, - } - }) - - Context("validateTunnelOptions", func() { - It("validates nil tunnel options", func() { - err := validateTunnelOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.Pulsar = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.Pulsar.Args = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - It("validates empty topic", func() { - tunnelOpts.Pulsar.Args.Topic = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrEmptyTopic)) - }) - It("passes validation", func() { - err := validateTunnelOptions(tunnelOpts) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Tunnel", func() { - var fakeTunnel *tunnelfakes.FakeITunnel - - BeforeEach(func() { - fakeTunnel = &tunnelfakes.FakeITunnel{} - fakeTunnel.ReadStub = func() chan *events.Outbound { - ch := make(chan *events.Outbound, 1) - ch <- &events.Outbound{Blob: []byte(`testing`)} - return ch - } - }) - - It("validates tunnel options", func() { - errorCh := make(chan *records.ErrorRecord) - err := (&Pulsar{}).Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - - It("returns error when producer fails to create", func() { - testErr := errors.New("test err") - - fakeClient := &pulsarfakes.FakeClient{} - fakeClient.CreateProducerStub = func(pulsar.ProducerOptions) (pulsar.Producer, error) { - return nil, testErr - } - - p := &Pulsar{ - client: fakeClient, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(context.Background(), tunnelOpts, fakeTunnel, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(testErr.Error())) - }) - - It("returns an error when publish fails", func() { - fakeProducer := &pulsarfakes.FakeProducer{} - fakePulsar := &pulsarfakes.FakeClient{} - fakePulsar.CreateProducerStub = func(pulsar.ProducerOptions) (pulsar.Producer, error) { - fakeProducer.SendStub = func(context.Context, *pulsar.ProducerMessage) (pulsar.MessageID, error) { - return nil, errors.New("test err") - } - return fakeProducer, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(time.Millisecond * 500) - cancel() - }() - - p := &Pulsar{ - client: fakePulsar, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - - // Allow start goroutine to launch - time.Sleep(time.Millisecond * 100) - - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring("Unable to replay message")) - Expect(fakeTunnel.StartCallCount()).To(Equal(1)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeProducer.SendCallCount()).To(Equal(1)) - }) - - It("replays a message", func() { - fakeProducer := &pulsarfakes.FakeProducer{} - fakePulsar := &pulsarfakes.FakeClient{} - fakePulsar.CreateProducerStub = func(pulsar.ProducerOptions) (pulsar.Producer, error) { - fakeProducer.SendStub = func(context.Context, *pulsar.ProducerMessage) (pulsar.MessageID, error) { - return &pulsarfakes.FakeMessageID{}, nil - } - return fakeProducer, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(time.Millisecond * 500) - cancel() - }() - - p := &Pulsar{ - client: fakePulsar, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - Expect(err).ToNot(HaveOccurred()) - Expect(fakeTunnel.StartCallCount()).To(Equal(1)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeProducer.SendCallCount()).To(Equal(1)) - }) - }) -}) diff --git a/backends/rabbit-streams/tunnel.go b/backends/rabbit-streams/tunnel.go deleted file mode 100644 index a25b8ae43..000000000 --- a/backends/rabbit-streams/tunnel.go +++ /dev/null @@ -1,83 +0,0 @@ -package rabbit_streams - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/amqp" - "github.com/rabbitmq/rabbitmq-stream-go-client/pkg/stream" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (r *RabbitStreams) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := r.log.WithField("pkg", "rabbit-streams/tunnel") - - // Make available to handleErr - r.streamName = tunnelOpts.RabbitStreams.Args.Stream - - producer, err := r.client.NewProducer(tunnelOpts.RabbitStreams.Args.Stream, - stream.NewProducerOptions(). - SetProducerName(tunnelOpts.RedisStreams.Args.WriteId). - SetBatchSize(1)) - - if err != nil { - return errors.Wrap(err, "unable to create rabbitmq streams producer") - } - - defer producer.Close() - - if err := tunnelSvc.Start(ctx, "RabbitMQ Streams", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - if err := producer.Send(amqp.NewMessage(outbound.Blob)); err != nil { - err = fmt.Errorf("unable to replay message: %s", err) - llog.Error(err) - return err - } - - llog.Debugf("Replayed message to Rabbit stream '%s' for replay '%s'", - tunnelOpts.RabbitStreams.Args.Stream, outbound.ReplayId) - - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.RabbitStreams == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.RabbitStreams.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if tunnelOpts.RabbitStreams.Args.Stream == "" { - return ErrEmptyStream - } - - return nil -} diff --git a/backends/rabbitmq/tunnel.go b/backends/rabbitmq/tunnel.go deleted file mode 100644 index bb3eef911..000000000 --- a/backends/rabbitmq/tunnel.go +++ /dev/null @@ -1,80 +0,0 @@ -package rabbitmq - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (r *RabbitMQ) Tunnel(ctx context.Context, opts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(opts); err != nil { - return errors.Wrap(err, "unable to validate tunnel options") - } - - llog := r.log.WithField("pkg", "rabbitmq/tunnel") - - // Nil check so that this can be injected into the struct for testing - if r.client == nil { - producer, err := r.newRabbitForWrite(opts.Rabbit.Args) - if err != nil { - return errors.Wrap(err, "unable to create rabbitmq producer") - } - - r.client = producer - } - - defer r.client.Close() - - if err := tunnelSvc.Start(ctx, "RabbitMQ", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - if err := r.client.Publish(ctx, opts.Rabbit.Args.RoutingKey, outbound.Blob); err != nil { - err = errors.Wrap(err, "Unable to reply message") - llog.Error(err) - return err - } - - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.Rabbit == nil { - return validate.ErrEmptyBackendGroup - } - - args := tunnelOpts.Rabbit.Args - if args == nil { - return validate.ErrEmptyBackendArgs - } - - if args.RoutingKey == "" { - return ErrEmptyRoutingKey - } - - if args.ExchangeName == "" { - return ErrEmptyExchangeName - } - - return nil -} diff --git a/backends/rabbitmq/tunnel_test.go b/backends/rabbitmq/tunnel_test.go deleted file mode 100644 index 8c312fc78..000000000 --- a/backends/rabbitmq/tunnel_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package rabbitmq - -import ( - "context" - "io/ioutil" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/backends/rabbitmq/rabbitfakes" - "github.com/streamdal/plumber/tunnel/tunnelfakes" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("RabbitMQ Backend", func() { - var tunnelOpts *opts.TunnelOptions - var fakeTunnel *tunnelfakes.FakeITunnel - - BeforeEach(func() { - tunnelOpts = &opts.TunnelOptions{ - Rabbit: &opts.TunnelGroupRabbitOptions{ - Args: &args.RabbitWriteArgs{ - ExchangeName: "testing", - RoutingKey: "testing", - }, - }, - } - - fakeTunnel = &tunnelfakes.FakeITunnel{} - fakeTunnel.ReadStub = func() chan *events.Outbound { - ch := make(chan *events.Outbound, 1) - ch <- &events.Outbound{Blob: []byte(`testing`)} - return ch - } - }) - - Context("validateTunnelOptions", func() { - It("validates nil tunnel options", func() { - err := validateTunnelOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.Rabbit = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.Rabbit.Args = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - It("validates empty routing key", func() { - tunnelOpts.Rabbit.Args.RoutingKey = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrEmptyRoutingKey)) - }) - It("validates empty exchange name", func() { - tunnelOpts.Rabbit.Args.ExchangeName = "" - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrEmptyExchangeName)) - }) - It("passes validation", func() { - err := validateTunnelOptions(tunnelOpts) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Tunnel", func() { - It("validates tunnel options", func() { - errorCh := make(chan *records.ErrorRecord) - err := (&RabbitMQ{}).Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - }) - - It("returns an when publish fails", func() { - testErr := errors.New("test err") - - fakeRabbit := &rabbitfakes.FakeIRabbit{} - fakeRabbit.PublishStub = func(context.Context, string, []byte) error { - return testErr - } - - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(time.Millisecond * 500) - cancel() - }() - - p := &RabbitMQ{ - client: fakeRabbit, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(testErr.Error())) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeRabbit.PublishCallCount()).To(Equal(1)) - }) - - It("replays a message", func() { - fakeRabbit := &rabbitfakes.FakeIRabbit{} - fakeRabbit.PublishStub = func(context.Context, string, []byte) error { - return nil - } - - ctx, cancel := context.WithCancel(context.Background()) - go func() { - time.Sleep(time.Millisecond * 500) - cancel() - }() - - p := &RabbitMQ{ - client: fakeRabbit, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - errorCh := make(chan *records.ErrorRecord) - err := p.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh) - - Expect(err).ToNot(HaveOccurred()) - Expect(fakeTunnel.StartCallCount()).To(Equal(1)) - Expect(fakeTunnel.ReadCallCount()).To(Equal(1)) - Expect(fakeRabbit.PublishCallCount()).To(Equal(1)) - }) - -}) diff --git a/backends/rpubsub/tunnel.go b/backends/rpubsub/tunnel.go deleted file mode 100644 index 02add8ba4..000000000 --- a/backends/rpubsub/tunnel.go +++ /dev/null @@ -1,68 +0,0 @@ -package rpubsub - -import ( - "context" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (r *RedisPubsub) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOpts(tunnelOpts); err != nil { - return errors.Wrap(err, "invalid tunnel options") - } - - llog := logrus.WithField("pkg", "rpubsub/tunnel") - - if err := tunnelSvc.Start(ctx, "Redis PubSub", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - // Continually loop looking for messages on the channel. - for { - select { - case outbound := <-outboundCh: - for _, ch := range tunnelOpts.RedisPubsub.Args.Channels { - err := r.client.Publish(context.Background(), ch, outbound.Blob).Err() - if err != nil { - llog.Errorf("Unable to replay message: %s", err) - break - } - - llog.Debugf("Replayed message to Redis PubSub channel '%s' for replay '%s'", ch, outbound.ReplayId) - } - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - - } -} - -func validateTunnelOpts(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.RedisPubsub == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.RedisPubsub.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if len(tunnelOpts.RedisPubsub.Args.Channels) == 0 { - return ErrMissingChannel - } - - return nil -} diff --git a/backends/rpubsub/tunnel_test.go b/backends/rpubsub/tunnel_test.go deleted file mode 100644 index d7b1b0ee5..000000000 --- a/backends/rpubsub/tunnel_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package rpubsub - -import ( - "context" - "io/ioutil" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("Redis PubSub Backend", func() { - var r *RedisPubsub - var tunnelOpts *opts.TunnelOptions - - BeforeEach(func() { - r = &RedisPubsub{ - connArgs: &args.RedisPubSubConn{}, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - tunnelOpts = &opts.TunnelOptions{ - RedisPubsub: &opts.TunnelGroupRedisPubSubOptions{ - Args: &args.RedisPubSubWriteArgs{ - Channels: []string{"test"}, - }, - }, - } - }) - - Context("validateTunnelOpts", func() { - It("validates nil tunnel options", func() { - err := validateTunnelOpts(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.RedisPubsub = nil - err := validateTunnelOpts(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.RedisPubsub.Args = nil - err := validateTunnelOpts(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - }) - - Context("Tunnel", func() { - It("validates dynaqmic options", func() { - errorCh := make(chan *records.ErrorRecord) - err := r.Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - - It("Publishes message", func() { - // Not tested due to lack non-exported struct returns - }) - }) -}) diff --git a/backends/rstreams/tunnel.go b/backends/rstreams/tunnel.go deleted file mode 100644 index 9ee65da2f..000000000 --- a/backends/rstreams/tunnel.go +++ /dev/null @@ -1,75 +0,0 @@ -package rstreams - -import ( - "context" - - "github.com/go-redis/redis/v8" - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/validate" -) - -func (r *RedisStreams) Tunnel(ctx context.Context, tunnelOpts *opts.TunnelOptions, tunnelSvc tunnel.ITunnel, errorCh chan<- *records.ErrorRecord) error { - if err := validateTunnelOptions(tunnelOpts); err != nil { - return errors.Wrap(err, "unable to validate tunnel options") - } - - llog := r.log.WithField("pkg", "rstreams/tunnel") - - if err := tunnelSvc.Start(ctx, "Redis Streams", errorCh); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - outboundCh := tunnelSvc.Read() - - for { - select { - case outbound := <-outboundCh: - for _, streamName := range tunnelOpts.RedisStreams.Args.Streams { - _, err := r.client.XAdd(ctx, &redis.XAddArgs{ - Stream: streamName, - ID: tunnelOpts.RedisStreams.Args.WriteId, - Values: map[string]interface{}{ - tunnelOpts.RedisStreams.Args.Key: outbound.Blob, - }, - }).Result() - if err != nil { - r.log.Errorf("unable to write message to '%s': %s", streamName, err) - continue - } - - r.log.Infof("Successfully wrote message to stream '%s' with key '%s' for replay '%s'", - streamName, tunnelOpts.RedisStreams.Args.Key, outbound.ReplayId) - } - case <-ctx.Done(): - llog.Debug("context cancelled") - return nil - } - } - - return nil -} - -func validateTunnelOptions(tunnelOpts *opts.TunnelOptions) error { - if tunnelOpts == nil { - return validate.ErrEmptyTunnelOpts - } - - if tunnelOpts.RedisStreams == nil { - return validate.ErrEmptyBackendGroup - } - - if tunnelOpts.RedisStreams.Args == nil { - return validate.ErrEmptyBackendArgs - } - - if len(tunnelOpts.RedisStreams.Args.Streams) == 0 { - return ErrMissingStream - } - - return nil -} diff --git a/backends/rstreams/tunnel_test.go b/backends/rstreams/tunnel_test.go deleted file mode 100644 index 72348555b..000000000 --- a/backends/rstreams/tunnel_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package rstreams - -import ( - "context" - "io/ioutil" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("Redis Streams Backend", func() { - var r *RedisStreams - var tunnelOpts *opts.TunnelOptions - - BeforeEach(func() { - r = &RedisStreams{ - connArgs: &args.RedisStreamsConn{}, - log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}), - } - - tunnelOpts = &opts.TunnelOptions{ - RedisStreams: &opts.TunnelGroupRedisStreamsOptions{ - Args: &args.RedisStreamsWriteArgs{ - Streams: []string{"test"}, - }, - }, - } - }) - - Context("validateTunnelOptions", func() { - It("validates nil writes options", func() { - err := validateTunnelOptions(nil) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyTunnelOpts)) - }) - It("validates nil backend group", func() { - tunnelOpts.RedisStreams = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendGroup)) - }) - It("validates empty backend args", func() { - tunnelOpts.RedisStreams.Args = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(validate.ErrEmptyBackendArgs)) - }) - It("validates missing stream", func() { - tunnelOpts.RedisStreams.Args.Streams = nil - err := validateTunnelOptions(tunnelOpts) - Expect(err).To(HaveOccurred()) - Expect(err).To(Equal(ErrMissingStream)) - }) - }) - - Context("Tunnel", func() { - It("validates tunnel options", func() { - errorCh := make(chan *records.ErrorRecord) - err := r.Tunnel(context.Background(), nil, nil, errorCh) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error())) - }) - - It("Publishes message", func() { - // Not tested due to lack of interface - }) - }) -}) diff --git a/bus/broadcast_consumer.go b/bus/broadcast_consumer.go index c34a67229..ba31223b9 100644 --- a/bus/broadcast_consumer.go +++ b/bus/broadcast_consumer.go @@ -58,18 +58,6 @@ func (b *Bus) broadcastCallback(ctx context.Context, natsMsg *nats.Msg) error { case ResumeRelay: err = b.doResumeRelay(ctx, msg) - // Tunnel - case CreateTunnel: - err = b.doCreateTunnel(ctx, msg) - case UpdateTunnel: - err = b.doUpdateTunnel(ctx, msg) - case DeleteTunnel: - err = b.doDeleteTunnel(ctx, msg) - case StopTunnel: - err = b.doStopTunnel(ctx, msg) - case ResumeTunnel: - err = b.doResumeTunnel(ctx, msg) - default: llog.Debugf("unrecognized action '%s' in msg on subj '%s' - skipping", msg.Action, natsMsg.Subject) } diff --git a/bus/broadcast_consumer_connection.go b/bus/broadcast_consumer_connection.go index 6593ab47d..495296c29 100644 --- a/bus/broadcast_consumer_connection.go +++ b/bus/broadcast_consumer_connection.go @@ -49,19 +49,9 @@ func (b *Bus) doUpdateConnection(ctx context.Context, msg *Message) error { } func (b *Bus) doDeleteConnection(ctx context.Context, msg *Message) error { - b.config.PersistentConfig.TunnelsMutex.RLock() - defer b.config.PersistentConfig.TunnelsMutex.RUnlock() b.config.PersistentConfig.RelaysMutex.RLock() defer b.config.PersistentConfig.RelaysMutex.RUnlock() - // Ensure this connection isn't being used by any tunnels - for id, tunnel := range b.config.PersistentConfig.Tunnels { - if tunnel.Options.ConnectionId == id { - return fmt.Errorf("cannot delete connection '%s' because it is in use by tunnel '%s'", - id, tunnel.Options.XTunnelId) - } - } - // Ensure this connection isn't being used by any relays for id, relay := range b.config.PersistentConfig.Relays { if relay.Options.ConnectionId == id { diff --git a/bus/broadcast_consumer_tunnel.go b/bus/broadcast_consumer_tunnel.go deleted file mode 100644 index f7ee5a9ef..000000000 --- a/bus/broadcast_consumer_tunnel.go +++ /dev/null @@ -1,114 +0,0 @@ -package bus - -import ( - "context" - "fmt" - - "github.com/golang/protobuf/proto" - "github.com/pkg/errors" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - - "github.com/streamdal/plumber/validate" -) - -func (b *Bus) doCreateTunnel(ctx context.Context, msg *Message) error { - tunnelOptions := &opts.TunnelOptions{} - if err := proto.Unmarshal(msg.Data, tunnelOptions); err != nil { - return errors.Wrap(err, "unable to unmarshal message into opts.TunnelOptions") - } - - if err := validate.TunnelOptionsForServer(tunnelOptions); err != nil { - return errors.Wrap(err, "tunnel option validation failed") - } - - if _, err := b.config.Actions.CreateTunnel(ctx, tunnelOptions); err != nil { - return errors.Wrap(err, "unable to create tunnel") - } - - b.log.Infof("Created tunnel '%s' (from broadcast msg)", tunnelOptions.XTunnelId) - - return nil -} - -func (b *Bus) doUpdateTunnel(ctx context.Context, msg *Message) error { - tunnelOptions := &opts.TunnelOptions{} - if err := proto.Unmarshal(msg.Data, tunnelOptions); err != nil { - return errors.Wrap(err, "unable to unmarshal message into opts.TunnelOptions") - } - - if tunnelOptions.XTunnelId == "" { - return errors.New("tunnel id in options cannot be empty") - } - - if _, err := b.config.Actions.UpdateTunnel(ctx, tunnelOptions.XTunnelId, tunnelOptions); err != nil { - return fmt.Errorf("unable to update tunnel '%s': %s", tunnelOptions.XTunnelId, err) - } - - b.log.Infof("Updated tunnel '%s' (from broadcast msg)", tunnelOptions.XTunnelId) - - return nil -} - -func (b *Bus) doStopTunnel(ctx context.Context, msg *Message) error { - // Only unmarshalling to get XTunnelId - we'll be operating off of what's in - // our cache. - tunnelOptions := &opts.TunnelOptions{} - if err := proto.Unmarshal(msg.Data, tunnelOptions); err != nil { - return errors.Wrap(err, "unable to unmarshal message into opts.TunnelOptions") - } - - if tunnelOptions.XTunnelId == "" { - return errors.New("tunnel id in options cannot be empty") - } - - if _, err := b.config.Actions.StopTunnel(ctx, tunnelOptions.XTunnelId); err != nil { - return fmt.Errorf("unable to stop tunnel '%s': %s", tunnelOptions.XTunnelId, err) - } - - b.log.Infof("Stopped tunnel '%s' (from broadcast msg)", tunnelOptions.XTunnelId) - - return nil -} - -func (b *Bus) doResumeTunnel(ctx context.Context, msg *Message) error { - // Only unmarshalling to get XTunnelId - we'll be operating off of what's in - // our cache. - tunnelOptions := &opts.TunnelOptions{} - if err := proto.Unmarshal(msg.Data, tunnelOptions); err != nil { - return errors.Wrap(err, "unable to unmarshal message into opts.TunnelOptions") - } - - if tunnelOptions.XTunnelId == "" { - return errors.New("tunnel id in options cannot be empty") - } - - if _, err := b.config.Actions.ResumeTunnel(ctx, tunnelOptions.XTunnelId); err != nil { - return fmt.Errorf("unable to resume tunnel '%s': %s", tunnelOptions.XTunnelId, err) - } - - b.log.Infof("Resumed tunnel '%s' (from broadcast msg)", tunnelOptions.XTunnelId) - - return nil -} - -func (b *Bus) doDeleteTunnel(ctx context.Context, msg *Message) error { - // Only unmarshalling to get XTunnelId - we'll be operating off of what's in - // our cache. - tunnelOptions := &opts.TunnelOptions{} - if err := proto.Unmarshal(msg.Data, tunnelOptions); err != nil { - return errors.Wrap(err, "unable to unmarshal message into opts.TunnelOptions") - } - - if tunnelOptions.XTunnelId == "" { - return errors.New("tunnel id in options cannot be empty") - } - - if err := b.config.Actions.DeleteTunnel(ctx, tunnelOptions.XTunnelId); err != nil { - return fmt.Errorf("unable to delete tunnel '%s': %s", tunnelOptions.XTunnelId, err) - } - - b.log.Infof("Deleted tunnel '%s' (from broadcast msg)", tunnelOptions.XTunnelId) - - return nil -} diff --git a/bus/bus.go b/bus/bus.go index 728e75e3f..25ed4a342 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -44,12 +44,6 @@ type IBus interface { PublishDeleteRelay(ctx context.Context, relay *opts.RelayOptions) error PublishStopRelay(ctx context.Context, relay *opts.RelayOptions) error PublishResumeRelay(ctx context.Context, relay *opts.RelayOptions) error - - PublishCreateTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error - PublishUpdateTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error - PublishStopTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error - PublishResumeTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error - PublishDeleteTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error } type Bus struct { diff --git a/bus/bus_noop.go b/bus/bus_noop.go index f4c8418ac..1fb9ec929 100644 --- a/bus/bus_noop.go +++ b/bus/bus_noop.go @@ -48,23 +48,3 @@ func (n NoOpBus) PublishStopRelay(_ context.Context, _ *opts.RelayOptions) error func (n NoOpBus) PublishResumeRelay(_ context.Context, _ *opts.RelayOptions) error { return nil } - -func (n NoOpBus) PublishCreateTunnel(_ context.Context, _ *opts.TunnelOptions) error { - return nil -} - -func (n NoOpBus) PublishUpdateTunnel(_ context.Context, _ *opts.TunnelOptions) error { - return nil -} - -func (n NoOpBus) PublishStopTunnel(_ context.Context, _ *opts.TunnelOptions) error { - return nil -} - -func (n NoOpBus) PublishResumeTunnel(_ context.Context, _ *opts.TunnelOptions) error { - return nil -} - -func (n NoOpBus) PublishDeleteTunnel(_ context.Context, _ *opts.TunnelOptions) error { - return nil -} diff --git a/bus/busfakes/fake_ibus.go b/bus/busfakes/fake_ibus.go index f20c103bc..da97d46a5 100644 --- a/bus/busfakes/fake_ibus.go +++ b/bus/busfakes/fake_ibus.go @@ -6,7 +6,6 @@ import ( "sync" "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/streamdal/plumber/bus" ) @@ -35,18 +34,6 @@ type FakeIBus struct { publishCreateRelayReturnsOnCall map[int]struct { result1 error } - PublishCreateTunnelStub func(context.Context, *opts.TunnelOptions) error - publishCreateTunnelMutex sync.RWMutex - publishCreateTunnelArgsForCall []struct { - arg1 context.Context - arg2 *opts.TunnelOptions - } - publishCreateTunnelReturns struct { - result1 error - } - publishCreateTunnelReturnsOnCall map[int]struct { - result1 error - } PublishDeleteConnectionStub func(context.Context, *opts.ConnectionOptions) error publishDeleteConnectionMutex sync.RWMutex publishDeleteConnectionArgsForCall []struct { @@ -71,18 +58,6 @@ type FakeIBus struct { publishDeleteRelayReturnsOnCall map[int]struct { result1 error } - PublishDeleteTunnelStub func(context.Context, *opts.TunnelOptions) error - publishDeleteTunnelMutex sync.RWMutex - publishDeleteTunnelArgsForCall []struct { - arg1 context.Context - arg2 *opts.TunnelOptions - } - publishDeleteTunnelReturns struct { - result1 error - } - publishDeleteTunnelReturnsOnCall map[int]struct { - result1 error - } PublishResumeRelayStub func(context.Context, *opts.RelayOptions) error publishResumeRelayMutex sync.RWMutex publishResumeRelayArgsForCall []struct { @@ -95,18 +70,6 @@ type FakeIBus struct { publishResumeRelayReturnsOnCall map[int]struct { result1 error } - PublishResumeTunnelStub func(context.Context, *opts.TunnelOptions) error - publishResumeTunnelMutex sync.RWMutex - publishResumeTunnelArgsForCall []struct { - arg1 context.Context - arg2 *opts.TunnelOptions - } - publishResumeTunnelReturns struct { - result1 error - } - publishResumeTunnelReturnsOnCall map[int]struct { - result1 error - } PublishStopRelayStub func(context.Context, *opts.RelayOptions) error publishStopRelayMutex sync.RWMutex publishStopRelayArgsForCall []struct { @@ -119,18 +82,6 @@ type FakeIBus struct { publishStopRelayReturnsOnCall map[int]struct { result1 error } - PublishStopTunnelStub func(context.Context, *opts.TunnelOptions) error - publishStopTunnelMutex sync.RWMutex - publishStopTunnelArgsForCall []struct { - arg1 context.Context - arg2 *opts.TunnelOptions - } - publishStopTunnelReturns struct { - result1 error - } - publishStopTunnelReturnsOnCall map[int]struct { - result1 error - } PublishUpdateConnectionStub func(context.Context, *opts.ConnectionOptions) error publishUpdateConnectionMutex sync.RWMutex publishUpdateConnectionArgsForCall []struct { @@ -155,18 +106,6 @@ type FakeIBus struct { publishUpdateRelayReturnsOnCall map[int]struct { result1 error } - PublishUpdateTunnelStub func(context.Context, *opts.TunnelOptions) error - publishUpdateTunnelMutex sync.RWMutex - publishUpdateTunnelArgsForCall []struct { - arg1 context.Context - arg2 *opts.TunnelOptions - } - publishUpdateTunnelReturns struct { - result1 error - } - publishUpdateTunnelReturnsOnCall map[int]struct { - result1 error - } StartStub func(context.Context) error startMutex sync.RWMutex startArgsForCall []struct { @@ -316,68 +255,6 @@ func (fake *FakeIBus) PublishCreateRelayReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeIBus) PublishCreateTunnel(arg1 context.Context, arg2 *opts.TunnelOptions) error { - fake.publishCreateTunnelMutex.Lock() - ret, specificReturn := fake.publishCreateTunnelReturnsOnCall[len(fake.publishCreateTunnelArgsForCall)] - fake.publishCreateTunnelArgsForCall = append(fake.publishCreateTunnelArgsForCall, struct { - arg1 context.Context - arg2 *opts.TunnelOptions - }{arg1, arg2}) - stub := fake.PublishCreateTunnelStub - fakeReturns := fake.publishCreateTunnelReturns - fake.recordInvocation("PublishCreateTunnel", []interface{}{arg1, arg2}) - fake.publishCreateTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeIBus) PublishCreateTunnelCallCount() int { - fake.publishCreateTunnelMutex.RLock() - defer fake.publishCreateTunnelMutex.RUnlock() - return len(fake.publishCreateTunnelArgsForCall) -} - -func (fake *FakeIBus) PublishCreateTunnelCalls(stub func(context.Context, *opts.TunnelOptions) error) { - fake.publishCreateTunnelMutex.Lock() - defer fake.publishCreateTunnelMutex.Unlock() - fake.PublishCreateTunnelStub = stub -} - -func (fake *FakeIBus) PublishCreateTunnelArgsForCall(i int) (context.Context, *opts.TunnelOptions) { - fake.publishCreateTunnelMutex.RLock() - defer fake.publishCreateTunnelMutex.RUnlock() - argsForCall := fake.publishCreateTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIBus) PublishCreateTunnelReturns(result1 error) { - fake.publishCreateTunnelMutex.Lock() - defer fake.publishCreateTunnelMutex.Unlock() - fake.PublishCreateTunnelStub = nil - fake.publishCreateTunnelReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeIBus) PublishCreateTunnelReturnsOnCall(i int, result1 error) { - fake.publishCreateTunnelMutex.Lock() - defer fake.publishCreateTunnelMutex.Unlock() - fake.PublishCreateTunnelStub = nil - if fake.publishCreateTunnelReturnsOnCall == nil { - fake.publishCreateTunnelReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.publishCreateTunnelReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeIBus) PublishDeleteConnection(arg1 context.Context, arg2 *opts.ConnectionOptions) error { fake.publishDeleteConnectionMutex.Lock() ret, specificReturn := fake.publishDeleteConnectionReturnsOnCall[len(fake.publishDeleteConnectionArgsForCall)] @@ -502,68 +379,6 @@ func (fake *FakeIBus) PublishDeleteRelayReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeIBus) PublishDeleteTunnel(arg1 context.Context, arg2 *opts.TunnelOptions) error { - fake.publishDeleteTunnelMutex.Lock() - ret, specificReturn := fake.publishDeleteTunnelReturnsOnCall[len(fake.publishDeleteTunnelArgsForCall)] - fake.publishDeleteTunnelArgsForCall = append(fake.publishDeleteTunnelArgsForCall, struct { - arg1 context.Context - arg2 *opts.TunnelOptions - }{arg1, arg2}) - stub := fake.PublishDeleteTunnelStub - fakeReturns := fake.publishDeleteTunnelReturns - fake.recordInvocation("PublishDeleteTunnel", []interface{}{arg1, arg2}) - fake.publishDeleteTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeIBus) PublishDeleteTunnelCallCount() int { - fake.publishDeleteTunnelMutex.RLock() - defer fake.publishDeleteTunnelMutex.RUnlock() - return len(fake.publishDeleteTunnelArgsForCall) -} - -func (fake *FakeIBus) PublishDeleteTunnelCalls(stub func(context.Context, *opts.TunnelOptions) error) { - fake.publishDeleteTunnelMutex.Lock() - defer fake.publishDeleteTunnelMutex.Unlock() - fake.PublishDeleteTunnelStub = stub -} - -func (fake *FakeIBus) PublishDeleteTunnelArgsForCall(i int) (context.Context, *opts.TunnelOptions) { - fake.publishDeleteTunnelMutex.RLock() - defer fake.publishDeleteTunnelMutex.RUnlock() - argsForCall := fake.publishDeleteTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIBus) PublishDeleteTunnelReturns(result1 error) { - fake.publishDeleteTunnelMutex.Lock() - defer fake.publishDeleteTunnelMutex.Unlock() - fake.PublishDeleteTunnelStub = nil - fake.publishDeleteTunnelReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeIBus) PublishDeleteTunnelReturnsOnCall(i int, result1 error) { - fake.publishDeleteTunnelMutex.Lock() - defer fake.publishDeleteTunnelMutex.Unlock() - fake.PublishDeleteTunnelStub = nil - if fake.publishDeleteTunnelReturnsOnCall == nil { - fake.publishDeleteTunnelReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.publishDeleteTunnelReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeIBus) PublishResumeRelay(arg1 context.Context, arg2 *opts.RelayOptions) error { fake.publishResumeRelayMutex.Lock() ret, specificReturn := fake.publishResumeRelayReturnsOnCall[len(fake.publishResumeRelayArgsForCall)] @@ -626,68 +441,6 @@ func (fake *FakeIBus) PublishResumeRelayReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeIBus) PublishResumeTunnel(arg1 context.Context, arg2 *opts.TunnelOptions) error { - fake.publishResumeTunnelMutex.Lock() - ret, specificReturn := fake.publishResumeTunnelReturnsOnCall[len(fake.publishResumeTunnelArgsForCall)] - fake.publishResumeTunnelArgsForCall = append(fake.publishResumeTunnelArgsForCall, struct { - arg1 context.Context - arg2 *opts.TunnelOptions - }{arg1, arg2}) - stub := fake.PublishResumeTunnelStub - fakeReturns := fake.publishResumeTunnelReturns - fake.recordInvocation("PublishResumeTunnel", []interface{}{arg1, arg2}) - fake.publishResumeTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeIBus) PublishResumeTunnelCallCount() int { - fake.publishResumeTunnelMutex.RLock() - defer fake.publishResumeTunnelMutex.RUnlock() - return len(fake.publishResumeTunnelArgsForCall) -} - -func (fake *FakeIBus) PublishResumeTunnelCalls(stub func(context.Context, *opts.TunnelOptions) error) { - fake.publishResumeTunnelMutex.Lock() - defer fake.publishResumeTunnelMutex.Unlock() - fake.PublishResumeTunnelStub = stub -} - -func (fake *FakeIBus) PublishResumeTunnelArgsForCall(i int) (context.Context, *opts.TunnelOptions) { - fake.publishResumeTunnelMutex.RLock() - defer fake.publishResumeTunnelMutex.RUnlock() - argsForCall := fake.publishResumeTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIBus) PublishResumeTunnelReturns(result1 error) { - fake.publishResumeTunnelMutex.Lock() - defer fake.publishResumeTunnelMutex.Unlock() - fake.PublishResumeTunnelStub = nil - fake.publishResumeTunnelReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeIBus) PublishResumeTunnelReturnsOnCall(i int, result1 error) { - fake.publishResumeTunnelMutex.Lock() - defer fake.publishResumeTunnelMutex.Unlock() - fake.PublishResumeTunnelStub = nil - if fake.publishResumeTunnelReturnsOnCall == nil { - fake.publishResumeTunnelReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.publishResumeTunnelReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeIBus) PublishStopRelay(arg1 context.Context, arg2 *opts.RelayOptions) error { fake.publishStopRelayMutex.Lock() ret, specificReturn := fake.publishStopRelayReturnsOnCall[len(fake.publishStopRelayArgsForCall)] @@ -750,68 +503,6 @@ func (fake *FakeIBus) PublishStopRelayReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeIBus) PublishStopTunnel(arg1 context.Context, arg2 *opts.TunnelOptions) error { - fake.publishStopTunnelMutex.Lock() - ret, specificReturn := fake.publishStopTunnelReturnsOnCall[len(fake.publishStopTunnelArgsForCall)] - fake.publishStopTunnelArgsForCall = append(fake.publishStopTunnelArgsForCall, struct { - arg1 context.Context - arg2 *opts.TunnelOptions - }{arg1, arg2}) - stub := fake.PublishStopTunnelStub - fakeReturns := fake.publishStopTunnelReturns - fake.recordInvocation("PublishStopTunnel", []interface{}{arg1, arg2}) - fake.publishStopTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeIBus) PublishStopTunnelCallCount() int { - fake.publishStopTunnelMutex.RLock() - defer fake.publishStopTunnelMutex.RUnlock() - return len(fake.publishStopTunnelArgsForCall) -} - -func (fake *FakeIBus) PublishStopTunnelCalls(stub func(context.Context, *opts.TunnelOptions) error) { - fake.publishStopTunnelMutex.Lock() - defer fake.publishStopTunnelMutex.Unlock() - fake.PublishStopTunnelStub = stub -} - -func (fake *FakeIBus) PublishStopTunnelArgsForCall(i int) (context.Context, *opts.TunnelOptions) { - fake.publishStopTunnelMutex.RLock() - defer fake.publishStopTunnelMutex.RUnlock() - argsForCall := fake.publishStopTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIBus) PublishStopTunnelReturns(result1 error) { - fake.publishStopTunnelMutex.Lock() - defer fake.publishStopTunnelMutex.Unlock() - fake.PublishStopTunnelStub = nil - fake.publishStopTunnelReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeIBus) PublishStopTunnelReturnsOnCall(i int, result1 error) { - fake.publishStopTunnelMutex.Lock() - defer fake.publishStopTunnelMutex.Unlock() - fake.PublishStopTunnelStub = nil - if fake.publishStopTunnelReturnsOnCall == nil { - fake.publishStopTunnelReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.publishStopTunnelReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeIBus) PublishUpdateConnection(arg1 context.Context, arg2 *opts.ConnectionOptions) error { fake.publishUpdateConnectionMutex.Lock() ret, specificReturn := fake.publishUpdateConnectionReturnsOnCall[len(fake.publishUpdateConnectionArgsForCall)] @@ -936,68 +627,6 @@ func (fake *FakeIBus) PublishUpdateRelayReturnsOnCall(i int, result1 error) { }{result1} } -func (fake *FakeIBus) PublishUpdateTunnel(arg1 context.Context, arg2 *opts.TunnelOptions) error { - fake.publishUpdateTunnelMutex.Lock() - ret, specificReturn := fake.publishUpdateTunnelReturnsOnCall[len(fake.publishUpdateTunnelArgsForCall)] - fake.publishUpdateTunnelArgsForCall = append(fake.publishUpdateTunnelArgsForCall, struct { - arg1 context.Context - arg2 *opts.TunnelOptions - }{arg1, arg2}) - stub := fake.PublishUpdateTunnelStub - fakeReturns := fake.publishUpdateTunnelReturns - fake.recordInvocation("PublishUpdateTunnel", []interface{}{arg1, arg2}) - fake.publishUpdateTunnelMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeIBus) PublishUpdateTunnelCallCount() int { - fake.publishUpdateTunnelMutex.RLock() - defer fake.publishUpdateTunnelMutex.RUnlock() - return len(fake.publishUpdateTunnelArgsForCall) -} - -func (fake *FakeIBus) PublishUpdateTunnelCalls(stub func(context.Context, *opts.TunnelOptions) error) { - fake.publishUpdateTunnelMutex.Lock() - defer fake.publishUpdateTunnelMutex.Unlock() - fake.PublishUpdateTunnelStub = stub -} - -func (fake *FakeIBus) PublishUpdateTunnelArgsForCall(i int) (context.Context, *opts.TunnelOptions) { - fake.publishUpdateTunnelMutex.RLock() - defer fake.publishUpdateTunnelMutex.RUnlock() - argsForCall := fake.publishUpdateTunnelArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeIBus) PublishUpdateTunnelReturns(result1 error) { - fake.publishUpdateTunnelMutex.Lock() - defer fake.publishUpdateTunnelMutex.Unlock() - fake.PublishUpdateTunnelStub = nil - fake.publishUpdateTunnelReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeIBus) PublishUpdateTunnelReturnsOnCall(i int, result1 error) { - fake.publishUpdateTunnelMutex.Lock() - defer fake.publishUpdateTunnelMutex.Unlock() - fake.PublishUpdateTunnelStub = nil - if fake.publishUpdateTunnelReturnsOnCall == nil { - fake.publishUpdateTunnelReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.publishUpdateTunnelReturnsOnCall[i] = struct { - result1 error - }{result1} -} - func (fake *FakeIBus) Start(arg1 context.Context) error { fake.startMutex.Lock() ret, specificReturn := fake.startReturnsOnCall[len(fake.startArgsForCall)] @@ -1119,28 +748,18 @@ func (fake *FakeIBus) Invocations() map[string][][]interface{} { defer fake.publishCreateConnectionMutex.RUnlock() fake.publishCreateRelayMutex.RLock() defer fake.publishCreateRelayMutex.RUnlock() - fake.publishCreateTunnelMutex.RLock() - defer fake.publishCreateTunnelMutex.RUnlock() fake.publishDeleteConnectionMutex.RLock() defer fake.publishDeleteConnectionMutex.RUnlock() fake.publishDeleteRelayMutex.RLock() defer fake.publishDeleteRelayMutex.RUnlock() - fake.publishDeleteTunnelMutex.RLock() - defer fake.publishDeleteTunnelMutex.RUnlock() fake.publishResumeRelayMutex.RLock() defer fake.publishResumeRelayMutex.RUnlock() - fake.publishResumeTunnelMutex.RLock() - defer fake.publishResumeTunnelMutex.RUnlock() fake.publishStopRelayMutex.RLock() defer fake.publishStopRelayMutex.RUnlock() - fake.publishStopTunnelMutex.RLock() - defer fake.publishStopTunnelMutex.RUnlock() fake.publishUpdateConnectionMutex.RLock() defer fake.publishUpdateConnectionMutex.RUnlock() fake.publishUpdateRelayMutex.RLock() defer fake.publishUpdateRelayMutex.RUnlock() - fake.publishUpdateTunnelMutex.RLock() - defer fake.publishUpdateTunnelMutex.RUnlock() fake.startMutex.RLock() defer fake.startMutex.RUnlock() fake.stopMutex.RLock() diff --git a/bus/message.go b/bus/message.go index afa0e4df7..daa239cf6 100644 --- a/bus/message.go +++ b/bus/message.go @@ -18,12 +18,6 @@ const ( StopRelay = "StopRelay" ResumeRelay = "ResumeRelay" - CreateTunnel = "CreateTunnel" - UpdateTunnel = "UpdateTunnel" - DeleteTunnel = "DeleteTunnel" - StopTunnel = "StopTunnel" - ResumeTunnel = "ResumeTunnel" - UpdateConfig = "UpdateConfig" ) @@ -31,7 +25,6 @@ var ( ValidActions = []Action{ CreateConnection, UpdateConnection, DeleteConnection, CreateRelay, UpdateRelay, DeleteRelay, StopRelay, ResumeRelay, - CreateTunnel, UpdateTunnel, DeleteTunnel, StopTunnel, ResumeTunnel, UpdateConfig, } ) diff --git a/bus/publish_helpers.go b/bus/publish_helpers.go index 4bf6ebcf5..885f119b9 100644 --- a/bus/publish_helpers.go +++ b/bus/publish_helpers.go @@ -58,36 +58,6 @@ func (b *Bus) PublishResumeRelay(ctx context.Context, relay *opts.RelayOptions) return b.publishRelayMessage(ctx, ResumeRelay, relay) } -// PublishCreateTunnel publishes a CreateTunnel message, which other plumber instances will receive -// and add the service to their local in-memory maps -func (b *Bus) PublishCreateTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error { - return b.publishTunnelMessage(ctx, CreateTunnel, tunnelOptions) -} - -// PublishUpdateTunnel publishes an UpdateTunnel message, which other plumber instances will receive -// and update the connection in their local in-memory maps -func (b *Bus) PublishUpdateTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error { - return b.publishTunnelMessage(ctx, UpdateTunnel, tunnelOptions) -} - -// PublishDeleteTunnel publishes a DeleteTunnel message, which other plumber instances will receive -// and delete from their local in-memory maps -func (b *Bus) PublishDeleteTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error { - return b.publishTunnelMessage(ctx, DeleteTunnel, tunnelOptions) -} - -// PublishStopTunnel broadcasts a StopTunnel message which will cause all plumber -// instances to stop the relay and remove it from their in-memory cache. -func (b *Bus) PublishStopTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error { - return b.publishTunnelMessage(ctx, StopTunnel, tunnelOptions) -} - -// PublishResumeTunnel broadcasts a ResumeTunnel message which will cause all plumber -// instances to start a stopped relay and add it to their in-memory cache. -func (b *Bus) PublishResumeTunnel(ctx context.Context, tunnelOptions *opts.TunnelOptions) error { - return b.publishTunnelMessage(ctx, ResumeTunnel, tunnelOptions) -} - func (b *Bus) publishConnectionMessage(ctx context.Context, action Action, conn *opts.ConnectionOptions) error { data, err := proto.Marshal(conn) if err != nil { @@ -115,17 +85,3 @@ func (b *Bus) publishRelayMessage(ctx context.Context, action Action, relay *opt EmittedAt: time.Now().UTC(), }) } - -func (b *Bus) publishTunnelMessage(ctx context.Context, action Action, tunnelOptions *opts.TunnelOptions) error { - data, err := proto.Marshal(tunnelOptions) - if err != nil { - return errors.Wrapf(err, "unable to marshal tunnel message for '%s'", tunnelOptions.XTunnelId) - } - - return b.broadcast(ctx, &Message{ - Action: action, - Data: data, - EmittedBy: b.config.PersistentConfig.PlumberID, - EmittedAt: time.Now().UTC(), - }) -} diff --git a/config/config.go b/config/config.go index 5ffe5659f..5c3b9099f 100644 --- a/config/config.go +++ b/config/config.go @@ -46,10 +46,8 @@ type Config struct { Connections map[string]*stypes.Connection `json:"connections"` Relays map[string]*stypes.Relay `json:"relays"` - Tunnels map[string]*stypes.Tunnel `json:"tunnels"` ConnectionsMutex *sync.RWMutex `json:"-"` RelaysMutex *sync.RWMutex `json:"-"` - TunnelsMutex *sync.RWMutex `json:"-"` enableCluster bool kv kv.IKV @@ -174,10 +172,8 @@ func newConfig(enableCluster bool, k kv.IKV) *Config { PlumberID: getPlumberID(), Connections: make(map[string]*stypes.Connection), Relays: make(map[string]*stypes.Relay), - Tunnels: make(map[string]*stypes.Tunnel), ConnectionsMutex: &sync.RWMutex{}, RelaysMutex: &sync.RWMutex{}, - TunnelsMutex: &sync.RWMutex{}, kv: k, enableCluster: enableCluster, @@ -266,10 +262,8 @@ func readConfigBytes(data []byte) (*Config, error) { cfg := &Config{ ConnectionsMutex: &sync.RWMutex{}, RelaysMutex: &sync.RWMutex{}, - TunnelsMutex: &sync.RWMutex{}, Connections: make(map[string]*stypes.Connection), Relays: make(map[string]*stypes.Relay), - Tunnels: make(map[string]*stypes.Tunnel), } if err := json.Unmarshal(data, cfg); err != nil { @@ -456,31 +450,3 @@ func (c *Config) DeleteConnection(connID string) { defer c.ConnectionsMutex.Unlock() delete(c.Connections, connID) } - -// GetTunnel returns an in-progress read from the Tunnels map -func (c *Config) GetTunnel(tunnelID string) *stypes.Tunnel { - c.TunnelsMutex.RLock() - defer c.TunnelsMutex.RUnlock() - - r, _ := c.Tunnels[tunnelID] - - return r -} - -// SetTunnel adds an in-progress read to the Tunnels map -func (c *Config) SetTunnel(tunnelID string, tunnel *stypes.Tunnel) { - c.TunnelsMutex.Lock() - defer c.TunnelsMutex.Unlock() - - if c.Tunnels == nil { - c.Tunnels = make(map[string]*stypes.Tunnel) - } - c.Tunnels[tunnelID] = tunnel -} - -// DeleteTunnel removes a tunnel from in-memory map -func (c *Config) DeleteTunnel(tunnelID string) { - c.TunnelsMutex.Lock() - defer c.TunnelsMutex.Unlock() - delete(c.Tunnels, tunnelID) -} diff --git a/go.mod b/go.mod index a92d0a4f0..f4a9638b8 100644 --- a/go.mod +++ b/go.mod @@ -28,9 +28,7 @@ require ( github.com/jhump/protoreflect v1.10.1 github.com/json-iterator/go v1.1.12 // indirect github.com/julienschmidt/httprouter v1.3.0 - github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23 github.com/kubemq-io/kubemq-go v1.7.2 - github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7 github.com/linkedin/goavro/v2 v2.9.8 github.com/logrusorgru/aurora v2.0.3+incompatible github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 @@ -97,7 +95,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/devigned/tab v0.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/dustin/go-humanize v1.0.0 // indirect github.com/dvsekhvalnov/jose2go v1.5.0 // indirect github.com/eapache/go-resiliency v1.3.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect diff --git a/go.sum b/go.sum index 22a4cf52c..7e1969513 100644 --- a/go.sum +++ b/go.sum @@ -178,8 +178,6 @@ github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TR github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= @@ -470,8 +468,6 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23 h1:M8exrBzuhWcU6aoHJlHWPe4qFjVKzkMGRal78f5jRRU= -github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23/go.mod h1:kBSna6b0/RzsOcOZf515vAXwSsXYusl2U7SA0XP09yI= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -499,8 +495,6 @@ github.com/kubemq-io/kubemq-go v1.7.2/go.mod h1:WP2VL6bibnCzTmlTKFtb5RWFvB/V9WPl github.com/kubemq-io/protobuf v1.3.1 h1:b4QcnpujV8U3go8pa2+FTESl6ygU6hY8APYibRtyemo= github.com/kubemq-io/protobuf v1.3.1/go.mod h1:mzbGBI05R+GhFLD520xweEIvDM+m4nI7ruJDhgEncas= github.com/kyleconroy/pgoutput v0.1.0/go.mod h1:xj1JLOlXvWLJ1CSJKlrKoWMxkDt9unagoStd2zrZ8IA= -github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7 h1:k/1ku0yehLCPqERCHkIHMDqDg1R02AcCScRuHbamU3s= -github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7/go.mod h1:YR/zYthNdWfO8+0IOyHDcIDBBBS2JMnYUIwSsnwmRqU= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= diff --git a/options/options.go b/options/options.go index 39fbfd3de..93416686e 100644 --- a/options/options.go +++ b/options/options.go @@ -99,21 +99,9 @@ func unsetUnusedOptions(kongCtx *kong.Context, cliOptions *opts.CLIOptions) { unsetUnusedRelayOpts(cliOptions) cliOptions.Read = nil cliOptions.Write = nil - case "tunnel": - unsetUnusedTunnelOpts(cliOptions) - case "streamdal": - unsetUnusedStreamdalOpts(cliOptions) } } -func unsetUnusedTunnelOpts(cliOptions *opts.CLIOptions) { - // TODO: unset unused backends -} - -func unsetUnusedStreamdalOpts(cliOptions *opts.CLIOptions) { - // TODO: Unset unused backends -} - func unsetUnusedReadOpts(kongCtx *kong.Context, cliOptions *opts.CLIOptions) { if cliOptions.Read.DecodeOptions.DecodeType == encoding.DecodeType_DECODE_TYPE_UNSET { cliOptions.Read.DecodeOptions = nil @@ -165,8 +153,6 @@ func ActionUsesBackend(action string) bool { return true case "write": return true - case "tunnel": - return true case "manage": return true } @@ -195,7 +181,6 @@ func NewCLIOptions() *opts.CLIOptions { Read: newReadOptions(), Write: newWriteOptions(), Relay: newRelayOptions(), - Tunnel: newTunnelOptions(), Manage: newManageOptions(), } } @@ -469,79 +454,6 @@ func newRelayOptions() *opts.RelayOptions { } } -func newTunnelOptions() *opts.TunnelOptions { - return &opts.TunnelOptions{ - Kafka: &opts.TunnelGroupKafkaOptions{ - XConn: &args.KafkaConn{ - Address: make([]string, 0), - }, - Args: &args.KafkaWriteArgs{}, - }, - Activemq: &opts.TunnelGroupActiveMQOptions{ - XConn: &args.ActiveMQConn{}, - Args: &args.ActiveMQWriteArgs{}, - }, - AwsSqs: &opts.TunnelGroupAWSSQSOptions{ - XConn: &args.AWSSQSConn{}, - Args: &args.AWSSQSWriteArgs{}, - }, - Nats: &opts.TunnelGroupNatsOptions{ - XConn: &args.NatsConn{ - TlsOptions: &args.NatsTLSOptions{}, - }, - Args: &args.NatsWriteArgs{}, - }, - NatsStreaming: &opts.TunnelGroupNatsStreamingOptions{ - XConn: &args.NatsStreamingConn{ - TlsOptions: &args.NatsStreamingTLSOptions{}, - }, - Args: &args.NatsStreamingWriteArgs{}, - }, - Nsq: &opts.TunnelGroupNSQOptions{ - XConn: &args.NSQConn{}, - Args: &args.NSQWriteArgs{}, - }, - Rabbit: &opts.TunnelGroupRabbitOptions{ - XConn: &args.RabbitConn{}, - Args: &args.RabbitWriteArgs{}, - }, - Mqtt: &opts.TunnelGroupMQTTOptions{ - XConn: &args.MQTTConn{ - TlsOptions: &args.MQTTTLSOptions{}, - }, - Args: &args.MQTTWriteArgs{}, - }, - AzureServiceBus: &opts.TunnelGroupAzureServiceBusOptions{ - XConn: &args.AzureServiceBusConn{}, - Args: &args.AzureServiceBusWriteArgs{}, - }, - AzureEventHub: &opts.TunnelGroupAzureEventHubOptions{ - XConn: &args.AzureEventHubConn{}, - Args: &args.AzureEventHubWriteArgs{}, - }, - GcpPubsub: &opts.TunnelGroupGCPPubSubOptions{ - XConn: &args.GCPPubSubConn{}, - Args: &args.GCPPubSubWriteArgs{}, - }, - KubemqQueue: &opts.TunnelGroupKubeMQQueueOptions{ - XConn: &args.KubeMQQueueConn{}, - Args: &args.KubeMQQueueWriteArgs{}, - }, - RedisPubsub: &opts.TunnelGroupRedisPubSubOptions{ - XConn: &args.RedisPubSubConn{}, - Args: &args.RedisPubSubWriteArgs{}, - }, - RedisStreams: &opts.TunnelGroupRedisStreamsOptions{ - XConn: &args.RedisStreamsConn{}, - Args: &args.RedisStreamsWriteArgs{}, - }, - AwsKinesis: &opts.TunnelGroupAWSKinesisOptions{ - XConn: &args.AWSKinesisConn{}, - Args: &args.AWSKinesisWriteArgs{}, - }, - } -} - func newManageOptions() *opts.ManageOptions { return &opts.ManageOptions{ GlobalOptions: &opts.GlobalManageOptions{}, diff --git a/plumber/cli_manage.go b/plumber/cli_manage.go index 9a6942761..5180431e3 100644 --- a/plumber/cli_manage.go +++ b/plumber/cli_manage.go @@ -71,38 +71,24 @@ func (p *Plumber) HandleManageCmd() error { } else { err = p.HandleGetRelayCmd(ctx, client) } - case "get tunnel": - if p.CLIOptions.Manage.Get.Tunnel.Id == "" { - err = p.HandleGetAllTunnelsCmd(ctx, client) - } else { - err = p.HandleGetTunnelCmd(ctx, client) - } // Create case "create connection": err = p.HandleCreateConnectionCmd(ctx, client) - case "create tunnel": - err = p.HandleCreateTunnelCmd(ctx, client) // Delete case "delete connection": err = p.HandleDeleteConnectionCmd(ctx, client) case "delete relay": err = p.HandleDeleteRelayCmd(ctx, client) - case "delete tunnel": - err = p.HandleDeleteTunnelCmd(ctx, client) // Stop case "stop relay": err = p.HandleStopRelayCmd(ctx, client) - case "stop tunnel": - err = p.HandleStopTunnelCmd(ctx, client) // Resume case "resume relay": err = p.HandleResumeRelayCmd(ctx, client) - case "resume tunnel": - err = p.HandleResumeTunnelCmd(ctx, client) default: return fmt.Errorf("unrecognized command: '%s'", cmd) } diff --git a/plumber/cli_manage_tunnel.go b/plumber/cli_manage_tunnel.go deleted file mode 100644 index 632af8a5c..000000000 --- a/plumber/cli_manage_tunnel.go +++ /dev/null @@ -1,158 +0,0 @@ -package plumber - -import ( - "context" - "strings" - - "github.com/batchcorp/plumber-schemas/build/go/protos" - "github.com/batchcorp/plumber-schemas/build/go/protos/common" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/pkg/errors" -) - -func (p *Plumber) HandleGetTunnelCmd(ctx context.Context, client protos.PlumberServerClient) error { - resp, err := client.GetTunnel(ctx, &protos.GetTunnelRequest{ - Auth: &common.Auth{ - Token: p.CLIOptions.Manage.GlobalOptions.ManageToken, - }, - TunnelId: p.CLIOptions.Manage.Get.Tunnel.Id, - }) - - if err != nil { - p.displayJSON(map[string]string{"error": "no such tunnel id"}) - return nil - } - - if err := p.displayProtobuf(resp); err != nil { - return errors.Wrap(err, "failed to display response") - } - - return nil -} - -func (p *Plumber) HandleGetAllTunnelsCmd(ctx context.Context, client protos.PlumberServerClient) error { - resp, err := client.GetAllTunnels(ctx, &protos.GetAllTunnelsRequest{ - Auth: &common.Auth{ - Token: p.CLIOptions.Manage.GlobalOptions.ManageToken, - }, - }) - - if err != nil { - return errors.Wrap(err, "failed to get all tunnels") - } - - if err := p.displayProtobuf(resp); err != nil { - return errors.Wrap(err, "failed to display response") - } - - return nil -} - -func (p *Plumber) HandleCreateTunnelCmd(ctx context.Context, client protos.PlumberServerClient) error { - // Create tunnel options from CLI opts - tunnelOpts, err := generateTunnelOptionsForManageCreate(p.CLIOptions) - if err != nil { - return errors.Wrap(err, "failed to generate tunnel options") - } - - resp, err := client.CreateTunnel(ctx, &protos.CreateTunnelRequest{ - Auth: &common.Auth{ - Token: p.CLIOptions.Manage.GlobalOptions.ManageToken, - }, - Opts: tunnelOpts, - }) - - if err != nil { - p.displayJSON(map[string]string{"error": err.Error()}) - return nil - } - - p.displayProtobuf(resp) - - return nil -} - -func (p *Plumber) HandleDeleteTunnelCmd(ctx context.Context, client protos.PlumberServerClient) error { - resp, err := client.DeleteTunnel(ctx, &protos.DeleteTunnelRequest{ - Auth: &common.Auth{ - Token: p.CLIOptions.Manage.GlobalOptions.ManageToken, - }, - TunnelId: p.CLIOptions.Manage.Delete.Tunnel.Id, - }) - - if err != nil { - p.displayJSON(map[string]string{"error": err.Error()}) - } - - if err := p.displayProtobuf(resp); err != nil { - return errors.Wrap(err, "failed to display response") - } - - return nil -} - -func (p *Plumber) HandleStopTunnelCmd(ctx context.Context, client protos.PlumberServerClient) error { - resp, err := client.StopTunnel(ctx, &protos.StopTunnelRequest{ - Auth: &common.Auth{ - Token: p.CLIOptions.Manage.GlobalOptions.ManageToken, - }, - TunnelId: p.CLIOptions.Manage.Stop.Tunnel.Id, - }) - - if err != nil { - p.displayJSON(map[string]string{"error": err.Error()}) - } - - if err := p.displayProtobuf(resp); err != nil { - return errors.Wrap(err, "failed to display response") - } - - return nil -} - -func (p *Plumber) HandleResumeTunnelCmd(ctx context.Context, client protos.PlumberServerClient) error { - resp, err := client.ResumeTunnel(ctx, &protos.ResumeTunnelRequest{ - Auth: &common.Auth{ - Token: p.CLIOptions.Manage.GlobalOptions.ManageToken, - }, - TunnelId: p.CLIOptions.Manage.Resume.Tunnel.Id, - }) - - if err != nil { - p.displayJSON(map[string]string{"error": err.Error()}) - } - - if err := p.displayProtobuf(resp); err != nil { - return errors.Wrap(err, "failed to display response") - } - - return nil -} - -func generateTunnelOptionsForManageCreate(cliOpts *opts.CLIOptions) (*opts.TunnelOptions, error) { - tunnelOpts := &opts.TunnelOptions{ - ApiToken: cliOpts.Manage.Create.Tunnel.TunnelToken, - ConnectionId: cliOpts.Manage.Create.Tunnel.ConnectionId, - XGrpcAddress: cliOpts.Manage.Create.Tunnel.XTunnelAddress, - XGrpcTimeoutSeconds: cliOpts.Manage.Create.Tunnel.XTunnelTimeoutSeconds, - XGrpcInsecure: cliOpts.Manage.Create.Tunnel.XTunnelInsecure, - Name: cliOpts.Manage.Create.Tunnel.Name, - } - - // We need to assign the CLI opts to the correct backend field in the request. - // As in, cliOpts.Manage.Create.Tunnel.Kafka needs to be assigned to tunnelOpts.Kafka - // (if kafka was specified). To do this, we will rely on a helper func that - // is generated via code-gen in plumber-schemas. - - // Some backends have a dash, remove it; all further normalization will be - // taken care of by the Merge function. - backendName := strings.Replace(cliOpts.Global.XBackend, "-", "", -1) - - tunnelOpts.Kafka = &opts.TunnelGroupKafkaOptions{Args: cliOpts.Manage.Create.Tunnel.Kafka} - - if err := opts.MergeTunnelOptions(backendName, tunnelOpts, cliOpts.Manage.Create.Tunnel); err != nil { - return nil, errors.Wrap(err, "unable to merge relay options") - } - - return tunnelOpts, nil -} diff --git a/plumber/cli_server.go b/plumber/cli_server.go index b1360e4fc..a4094c4a9 100644 --- a/plumber/cli_server.go +++ b/plumber/cli_server.go @@ -70,10 +70,6 @@ func (p *Plumber) RunServer() error { if err := p.relaunchRelays(); err != nil { p.log.Error(errors.Wrap(err, "failed to relaunch relays")) } - - if err := p.relaunchTunnels(); err != nil { - p.log.Error(errors.Wrap(err, "failed to relaunch tunnels")) - } }() // Wait for shutdown @@ -114,29 +110,6 @@ func (p *Plumber) relaunchRelays() error { return nil } -func (p *Plumber) relaunchTunnels() error { - for tunnelID, tunnel := range p.PersistentConfig.Tunnels { - // We want to "create" both active and inactive relays through CreateTunnel - // as we need to create backends, create shutdown context, channels, etc. - d, err := p.Actions.CreateTunnel(p.ServiceShutdownCtx, tunnel.Options) - if err != nil { - return errors.Wrapf(err, "unable to create tunnel '%s'", tunnelID) - } - - if tunnel.Active { - p.log.Infof("Tunnel '%s' re-started", tunnelID) - } else { - p.log.Debugf("Tunnel '%s' is inactive - not relaunching", tunnelID) - } - - p.PersistentConfig.SetTunnel(tunnelID, d) - p.PersistentConfig.Save() - } - - return nil - -} - func (p *Plumber) startGRPCServer() error { lis, err := net.Listen("tcp", p.CLIOptions.Server.GrpcListenAddress) if err != nil { diff --git a/plumber/cli_tunnel.go b/plumber/cli_tunnel.go deleted file mode 100644 index 85f8a2cec..000000000 --- a/plumber/cli_tunnel.go +++ /dev/null @@ -1,47 +0,0 @@ -package plumber - -import ( - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - "github.com/pkg/errors" - - "github.com/streamdal/plumber/backends" - "github.com/streamdal/plumber/options" - "github.com/streamdal/plumber/tunnel" -) - -// HandleTunnelCmd handles tunnel destination mode commands -func (p *Plumber) HandleTunnelCmd() error { - backend, err := backends.New(p.cliConnOpts) - if err != nil { - return errors.Wrap(err, "unable to instantiate backend") - } - - // Run up tunnel - // Plumber cluster ID purposefully left blank here so the destination becomes ephemeral - tunnelSvc, err := tunnel.New(p.CLIOptions.Tunnel, &tunnel.Config{ - PlumberVersion: options.VERSION, - PlumberClusterID: p.PersistentConfig.ClusterID, - PlumberID: p.PersistentConfig.PlumberID, - }) - if err != nil { - return errors.Wrap(err, "could not establish connection to Batch") - } - - // Clean up gRPC connection - defer tunnelSvc.Close() - - errorCh := make(chan *records.ErrorRecord, 1000) - - go func() { - for err := range errorCh { - p.log.Errorf("Received error from tunnel component: %s", err.Error) - } - }() - - // Blocks until completion - if err := backend.Tunnel(p.ServiceShutdownCtx, p.CLIOptions.Tunnel, tunnelSvc, errorCh); err != nil { - return errors.Wrap(err, "error(s) during tunnel run") - } - - return nil -} diff --git a/plumber/plumber.go b/plumber/plumber.go index 557f80519..0ec49fd8e 100644 --- a/plumber/plumber.go +++ b/plumber/plumber.go @@ -149,8 +149,6 @@ func (p *Plumber) Run() { case "relay": printer.PrintRelayOptions(p.CLIOptions) err = p.HandleRelayCmd() - case "tunnel": - err = p.HandleTunnelCmd() case "manage": err = p.HandleManageCmd() default: diff --git a/prometheus/prometheus.go b/prometheus/prometheus.go index 24650f43d..1387b8d1f 100644 --- a/prometheus/prometheus.go +++ b/prometheus/prometheus.go @@ -19,7 +19,6 @@ const ( PlumberReadErrors = "plumber_read_errors" PlumberGRPCErrors = "plumber_grpc_errors" PlumberRelayWorkers = "plumber_relay_workers" - PlumberTunnels = "plumber_tunnels" ) var ( diff --git a/server/connections_handlers.go b/server/connections_handlers.go index 91b26c02b..4ec5135e2 100644 --- a/server/connections_handlers.go +++ b/server/connections_handlers.go @@ -187,17 +187,6 @@ func (s *Server) DeleteConnection(ctx context.Context, req *protos.DeleteConnect return nil, CustomError(common.Code_NOT_FOUND, "no such connection id") } - // Ensure this connection isn't being used by any tunnels - s.PersistentConfig.TunnelsMutex.RLock() - for id, tunnel := range s.PersistentConfig.Tunnels { - if tunnel.Options.ConnectionId == id { - s.PersistentConfig.TunnelsMutex.RUnlock() - return nil, fmt.Errorf("cannot delete connection '%s' because it is in use by tunnel '%s'", - id, tunnel.Options.XTunnelId) - } - } - s.PersistentConfig.TunnelsMutex.RUnlock() - // Ensure this connection isn't being used by any relays s.PersistentConfig.RelaysMutex.RLock() for id, relay := range s.PersistentConfig.Relays { diff --git a/server/connections_test.go b/server/connections_test.go index cbaef1010..56a440f41 100644 --- a/server/connections_test.go +++ b/server/connections_test.go @@ -37,7 +37,6 @@ var _ = Describe("Connection", func() { PersistentConfig: &config.Config{ ConnectionsMutex: &sync.RWMutex{}, Connections: map[string]*stypes.Connection{}, - TunnelsMutex: &sync.RWMutex{}, RelaysMutex: &sync.RWMutex{}, }, Log: logrus.NewEntry(logger), diff --git a/server/relay_test.go b/server/relay_test.go index f25017361..dc0cdc35f 100644 --- a/server/relay_test.go +++ b/server/relay_test.go @@ -30,7 +30,6 @@ var _ = Describe("Relay", func() { pConfig := &config.Config{ Connections: map[string]*stypes.Connection{}, Relays: map[string]*stypes.Relay{}, - TunnelsMutex: &sync.RWMutex{}, RelaysMutex: &sync.RWMutex{}, ConnectionsMutex: &sync.RWMutex{}, } diff --git a/server/tunnel_handlers.go b/server/tunnel_handlers.go index c6a18f097..8089abfa1 100644 --- a/server/tunnel_handlers.go +++ b/server/tunnel_handlers.go @@ -2,222 +2,37 @@ package server import ( "context" - "fmt" - - uuid "github.com/satori/go.uuid" + "errors" "github.com/batchcorp/plumber-schemas/build/go/protos" - "github.com/batchcorp/plumber-schemas/build/go/protos/common" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" ) +// TODO: remove these handlers when protos are updated to remove tunnel opts, +// TODO: they need to be stubbged currently to satisfy the existing interface func (s *Server) GetAllTunnels(_ context.Context, req *protos.GetAllTunnelsRequest) (*protos.GetAllTunnelsResponse, error) { - if err := s.validateAuth(req.Auth); err != nil { - return nil, CustomError(common.Code_UNAUTHENTICATED, fmt.Sprintf("invalid auth: %s", err)) - } - - var numActive int - var numInactive int - - tunnelOptions := make([]*opts.TunnelOptions, 0) - for _, v := range s.PersistentConfig.Tunnels { - if v.Active { - numActive += 1 - } else { - numInactive += 1 - } - - tunnelOptions = append(tunnelOptions, v.Options) - } - - msg := fmt.Sprintf("found '%d' active and '%d' inactive tunnels", numActive, numInactive) - - return &protos.GetAllTunnelsResponse{ - Status: &common.Status{ - Code: common.Code_OK, - Message: msg, - RequestId: uuid.NewV4().String(), - }, - Opts: tunnelOptions, - }, nil + return nil, errors.New("tunnels are no longer supported") } func (s *Server) GetTunnel(_ context.Context, request *protos.GetTunnelRequest) (*protos.GetTunnelResponse, error) { - if err := s.validateAuth(request.Auth); err != nil { - return nil, CustomError(common.Code_UNAUTHENTICATED, fmt.Sprintf("invalid auth: %s", err)) - } - - if request.TunnelId == "" { - return nil, CustomError(common.Code_INVALID_ARGUMENT, "id cannot be empty") - } - - tunnelCfg := s.PersistentConfig.GetTunnel(request.TunnelId) - if tunnelCfg == nil { - return nil, CustomError(common.Code_NOT_FOUND, fmt.Sprintf("tunnel not found: %s", request.TunnelId)) - } - - return &protos.GetTunnelResponse{ - Status: &common.Status{ - Code: common.Code_OK, - RequestId: uuid.NewV4().String(), - }, - Opts: tunnelCfg.Options, - }, nil + return nil, errors.New("tunnels are no longer supported") } func (s *Server) CreateTunnel(ctx context.Context, req *protos.CreateTunnelRequest) (*protos.CreateTunnelResponse, error) { - if err := s.validateAuth(req.Auth); err != nil { - return nil, CustomError(common.Code_UNAUTHENTICATED, fmt.Sprintf("invalid auth: %s", err)) - } - - req.Opts.XTunnelId = uuid.NewV4().String() - req.Opts.XActive = true - - d, err := s.Actions.CreateTunnel(ctx, req.Opts) - if err != nil { - s.rollbackCreateTunnel(ctx, req.Opts) - return nil, CustomError(common.Code_ABORTED, err.Error()) - } - - // Publish CreateTunnel event - if err := s.Bus.PublishCreateTunnel(ctx, d.Options); err != nil { - s.rollbackCreateTunnel(ctx, req.Opts) - s.Log.Error(err) - } - - s.Log.Infof("Tunnel '%s' created", d.Id) - - return &protos.CreateTunnelResponse{ - Status: &common.Status{ - Code: common.Code_OK, - Message: "Tunnel created", - RequestId: uuid.NewV4().String(), - }, - TunnelId: d.Id, - }, nil -} - -func (s *Server) rollbackCreateTunnel(ctx context.Context, req *opts.TunnelOptions) { - s.PersistentConfig.DeleteTunnel(req.XTunnelId) - s.PersistentConfig.Save() + return nil, errors.New("tunnels are no longer supported") } func (s *Server) UpdateTunnel(ctx context.Context, req *protos.UpdateTunnelRequest) (*protos.UpdateTunnelResponse, error) { - if err := s.validateAuth(req.Auth); err != nil { - return nil, CustomError(common.Code_UNAUTHENTICATED, fmt.Sprintf("invalid auth: %s", err)) - } - - currentTunnel := s.PersistentConfig.GetTunnel(req.TunnelId) - if currentTunnel.Active { - // Publish StopTunnel event - if err := s.Bus.PublishStopTunnel(ctx, currentTunnel.Options); err != nil { - return nil, fmt.Errorf("unable to publish stop tunnel event: %s", err) - } - } - - if _, err := s.Actions.UpdateTunnel(ctx, req.TunnelId, req.Opts); err != nil { - // No need to roll back here since we haven't updated anything yet - return nil, CustomError(common.Code_ABORTED, err.Error()) - } - - if err := s.Bus.PublishUpdateTunnel(ctx, req.Opts); err != nil { - // TODO: Should have rollback - return nil, fmt.Errorf("unable to publish update tunnel event: %s", err) - } - - s.Log.Infof("Tunnel '%s' updated", req.TunnelId) - - return &protos.UpdateTunnelResponse{ - Status: &common.Status{ - Code: common.Code_OK, - Message: "Tunnel updated", - RequestId: uuid.NewV4().String(), - }, - }, nil - + return nil, errors.New("tunnels are no longer supported") } func (s *Server) StopTunnel(ctx context.Context, req *protos.StopTunnelRequest) (*protos.StopTunnelResponse, error) { - if err := s.validateAuth(req.Auth); err != nil { - return nil, CustomError(common.Code_UNAUTHENTICATED, fmt.Sprintf("invalid auth: %s", err)) - } - - tunnelOptions, err := s.Actions.StopTunnel(ctx, req.TunnelId) - if err != nil { - return nil, CustomError(common.Code_ABORTED, err.Error()) - } - - // Publish StopTunnel event - if err := s.Bus.PublishStopTunnel(ctx, tunnelOptions.Options); err != nil { - // TODO: Should have rollback - s.Log.Errorf("unable to publish stop tunnel event: %s", err) - } - - s.Log.Infof("Tunnel '%s' stopped", req.TunnelId) - - return &protos.StopTunnelResponse{ - Status: &common.Status{ - Code: common.Code_OK, - Message: "Tunnel replay stopped", - RequestId: uuid.NewV4().String(), - }, - }, nil + return nil, errors.New("tunnels are no longer supported") } func (s *Server) ResumeTunnel(ctx context.Context, req *protos.ResumeTunnelRequest) (*protos.ResumeTunnelResponse, error) { - if err := s.validateAuth(req.Auth); err != nil { - return nil, CustomError(common.Code_UNAUTHENTICATED, fmt.Sprintf("invalid auth: %s", err)) - } - - d, err := s.Actions.ResumeTunnel(ctx, req.TunnelId) - if err != nil { - return nil, CustomError(common.Code_ABORTED, err.Error()) - } - - // Publish CreateTunnel event - if err := s.Bus.PublishResumeTunnel(ctx, d.Options); err != nil { - // TODO: Should have rollback - s.Log.Errorf("unable to publish resume tunnel event: %s", err) - } - - s.Log.Infof("Tunnel '%s' started", d.Id) - - return &protos.ResumeTunnelResponse{ - Status: &common.Status{ - Code: common.Code_OK, - Message: "Tunnel replay resumed", - RequestId: uuid.NewV4().String(), - }, - }, nil + return nil, errors.New("tunnels are no longer supported") } func (s *Server) DeleteTunnel(ctx context.Context, req *protos.DeleteTunnelRequest) (*protos.DeleteTunnelResponse, error) { - if err := s.validateAuth(req.Auth); err != nil { - return nil, CustomError(common.Code_UNAUTHENTICATED, fmt.Sprintf("invalid auth: %s", err)) - } - - // Needed for PublishDeleteTunnel() below - tunnelCfg := s.PersistentConfig.GetTunnel(req.TunnelId) - if tunnelCfg == nil { - return nil, CustomError(common.Code_NOT_FOUND, "tunnel does not exist") - } - - if err := s.Actions.DeleteTunnel(ctx, req.TunnelId); err != nil { - return nil, CustomError(common.Code_ABORTED, err.Error()) - } - - // Publish delete event - if err := s.Bus.PublishDeleteTunnel(ctx, tunnelCfg.Options); err != nil { - s.Log.Error(err) - } - - s.Log.Infof("Tunnel '%s' deleted", req.TunnelId) - - return &protos.DeleteTunnelResponse{ - Status: &common.Status{ - Code: common.Code_OK, - Message: "Tunnel replay deleted", - RequestId: uuid.NewV4().String(), - }, - }, nil + return nil, errors.New("tunnels are no longer supported") } diff --git a/server/tunnel_test.go b/server/tunnel_test.go deleted file mode 100644 index f81fe477f..000000000 --- a/server/tunnel_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package server - -import ( - "context" - "sync" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - uuid "github.com/satori/go.uuid" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/plumber-schemas/build/go/protos" - "github.com/batchcorp/plumber-schemas/build/go/protos/args" - "github.com/batchcorp/plumber-schemas/build/go/protos/common" - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - - "github.com/streamdal/plumber/actions/actionsfakes" - "github.com/streamdal/plumber/bus/busfakes" - "github.com/streamdal/plumber/config" - stypes "github.com/streamdal/plumber/server/types" - "github.com/streamdal/plumber/validate" -) - -var _ = Describe("Tunnel", func() { - - var p *Server - - BeforeEach(func() { - fakeBus := &busfakes.FakeIBus{} - pConfig := &config.Config{ - Connections: map[string]*stypes.Connection{}, - Tunnels: map[string]*stypes.Tunnel{}, - TunnelsMutex: &sync.RWMutex{}, - ConnectionsMutex: &sync.RWMutex{}, - } - - fakeActions := &actionsfakes.FakeIActions{} - - p = &Server{ - Bus: fakeBus, - Actions: fakeActions, - - AuthToken: "streamdal", - PersistentConfig: pConfig, - Log: logrus.NewEntry(logger), - } - }) - - Context("GetAllTunnels", func() { - It("check auth token", func() { - _, err := p.CreateTunnel(context.Background(), &protos.CreateTunnelRequest{ - Auth: &common.Auth{Token: "incorrect token"}, - }) - - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrInvalidToken.Error())) - }) - - It("returns all tunnels", func() { - var inTunnels []*stypes.Tunnel - - for i := 0; i < 10; i++ { - // set connection - connID := uuid.NewV4().String() - conn := &opts.ConnectionOptions{ - Name: "testing", - Notes: "test connection", - XId: connID, - Conn: &opts.ConnectionOptions_Kafka{Kafka: &args.KafkaConn{ - Address: []string{"127.0.0.1:9200"}, - }}, - } - p.PersistentConfig.SetConnection(connID, &stypes.Connection{Connection: conn}) - - tunnelOpts := &opts.TunnelOptions{ - XActive: false, - XTunnelId: uuid.NewV4().String(), - ConnectionId: connID, - } - tunnelId := uuid.NewV4().String() - tunnel := &stypes.Tunnel{ - Active: false, - Id: tunnelId, - Options: tunnelOpts, - } - p.PersistentConfig.SetTunnel(tunnelId, tunnel) - inTunnels = append(inTunnels, tunnel) - } - - resp, err := p.GetAllTunnels(context.Background(), &protos.GetAllTunnelsRequest{ - Auth: &common.Auth{Token: "streamdal"}, - }) - - Expect(err).ToNot(HaveOccurred()) - Expect(len(resp.Opts)).To(Equal(10)) - - for i := 0; i < 10; i++ { - Expect(resp.Opts[i].XTunnelId, inTunnels[i].Id) - } - }) - }) - - Context("DeleteTunnel", func() { - It("check auth token", func() { - _, err := p.DeleteTunnel(context.Background(), &protos.DeleteTunnelRequest{ - Auth: &common.Auth{Token: "incorrect token"}, - }) - Expect(err).To(HaveOccurred()) - Expect(err.Error()).To(ContainSubstring(validate.ErrInvalidToken.Error())) - }) - - It("delete a tunnel", func() { - // set tunnel - connID := uuid.NewV4().String() - conn := &opts.ConnectionOptions{ - Name: "testing", - Notes: "test connection", - XId: connID, - Conn: &opts.ConnectionOptions_Kafka{Kafka: &args.KafkaConn{ - Address: []string{"127.0.0.1:9200"}, - }}, - } - p.PersistentConfig.SetConnection(connID, &stypes.Connection{Connection: conn}) - - tunnelId := uuid.NewV4().String() - tunnelOpts := &opts.TunnelOptions{ - XActive: false, - XTunnelId: tunnelId, - ConnectionId: connID, - } - - tunnel := &stypes.Tunnel{ - Active: false, - Id: tunnelId, - Options: tunnelOpts, - } - p.PersistentConfig.SetTunnel(tunnelId, tunnel) - - delRequest := &protos.DeleteTunnelRequest{ - Auth: &common.Auth{Token: "streamdal"}, - TunnelId: tunnelId, - } - - bFake := &busfakes.FakeIBus{} - p.Bus = bFake - aFake := &actionsfakes.FakeIActions{} - aFake.DeleteTunnelStub = func(ctx context.Context, s string) error { - return nil - } - p.Actions = aFake - - _, err := p.DeleteTunnel(context.Background(), delRequest) - Expect(err).ToNot(HaveOccurred()) - Expect(aFake.DeleteTunnelCallCount()).To(Equal(1)) - }) - - }) -}) diff --git a/server/types/dynamic.go b/server/types/dynamic.go deleted file mode 100644 index 1b380d517..000000000 --- a/server/types/dynamic.go +++ /dev/null @@ -1,136 +0,0 @@ -package types - -import ( - "bytes" - "context" - "fmt" - "time" - - "github.com/golang/protobuf/jsonpb" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/backends" - "github.com/streamdal/plumber/options" - "github.com/streamdal/plumber/tunnel" - "github.com/streamdal/plumber/util" -) - -type Tunnel struct { - Active bool `json:"-"` - Id string `json:"-"` - CancelCtx context.Context `json:"-"` - CancelFunc context.CancelFunc `json:"-"` - Backend backends.Backend `json:"-"` - Options *opts.TunnelOptions `json:"config"` - TunnelService tunnel.ITunnel - PlumberClusterID string `json:"-"` - PlumberID string `json:"-"` - PlumberVersion string `json:"-"` - - log *logrus.Entry -} - -// StartTunnel will attempt to start the replay tunnel. Upon the start, it will -// wait for the given "delay" listening for errors. It will return an error -// if it encounters any errors on the ErrorChan or if the Tunnel call fails. -// -// Subsequent failures inside of Tunnel() are not handled yet. -func (d *Tunnel) StartTunnel(delay time.Duration) error { - d.log = logrus.WithField("pkg", "types/tunnel") - - d.log.Debugf("Plumber cluster ID: %s", d.PlumberClusterID) - - // Create a new tunnel - tunnelSvc, err := tunnel.New(d.Options, &tunnel.Config{ - PlumberVersion: options.VERSION, - PlumberClusterID: d.PlumberClusterID, - PlumberID: d.PlumberID, - }) - if err != nil { - return errors.Wrap(err, "could not establish connection to Batch") - } - - d.TunnelService = tunnelSvc - - localErrCh := make(chan *records.ErrorRecord, 1) - - d.Active = true - d.Options.XActive = true - - go func() { - // Blocks until tunnel is closed - if err := d.Backend.Tunnel(d.CancelCtx, d.Options, tunnelSvc, localErrCh); err != nil { - util.WriteError(d.log, localErrCh, fmt.Errorf("error during tunnel (id: %s): %s", d.Id, err)) - - // Cancel any goroutines spawned by Tunnel() - d.CancelFunc() - - // Give it a sec - time.Sleep(time.Second) - - // Clean up connection to user's message bus - d.Close() - } - - d.log.Debugf("goroutine exiting for tunnel_id '%s'", d.Id) - }() - - timeAfterCh := time.After(delay) - - // Will block for =< delay - select { - case <-timeAfterCh: - d.log.Debugf("tunnel id '%s' success after %s wait", d.Id, delay.String()) - break - case err := <-localErrCh: - return fmt.Errorf("tunnel startup failed for id '%s': %s", d.Id, err.Error) - } - - return nil - -} - -func (d *Tunnel) Close() { - // Clean up connection to user's message bus - if err := d.Backend.Close(context.Background()); err != nil { - d.log.Errorf("error closing tunnel backend: %s", err) - } - - // Clean up gRPC connection - if err := d.TunnelService.Close(); err != nil { - d.log.Errorf("error closing tunnel gRPC connection: %s", err) - } - - // This gets re-set on ResumeTunnel - d.Backend = nil -} - -// MarshalJSON marshals a tunnel to JSON -func (d *Tunnel) MarshalJSON() ([]byte, error) { - buf := bytes.NewBuffer([]byte(``)) - - m := jsonpb.Marshaler{} - if err := m.Marshal(buf, d.Options); err != nil { - return nil, errors.Wrap(err, "could not marshal opts.TunnelOptions") - } - - return buf.Bytes(), nil -} - -// UnmarshalJSON unmarshals JSON into a tunnel struct -func (d *Tunnel) UnmarshalJSON(v []byte) error { - tunnelOpts := &opts.TunnelOptions{} - if err := jsonpb.Unmarshal(bytes.NewBuffer(v), tunnelOpts); err != nil { - return errors.Wrap(err, "unable to unmarshal stored tunnel") - } - - d.Options = tunnelOpts - d.Id = tunnelOpts.XTunnelId - d.Active = tunnelOpts.XActive - - return nil -} diff --git a/server/types/relay.go b/server/types/relay.go index 7feb04fdb..fb1fd7972 100644 --- a/server/types/relay.go +++ b/server/types/relay.go @@ -88,7 +88,7 @@ func (r *Relay) StartRelay(delay time.Duration) error { return nil } -// MarshalJSON marshals a tunnel to JSON +// MarshalJSON marshals a RelayOptions to JSON func (r *Relay) MarshalJSON() ([]byte, error) { buf := bytes.NewBuffer([]byte(``)) @@ -100,7 +100,7 @@ func (r *Relay) MarshalJSON() ([]byte, error) { return buf.Bytes(), nil } -// UnmarshalJSON unmarshals JSON into a tunnel struct +// UnmarshalJSON unmarshals JSON into a RelayOptions struct func (r *Relay) UnmarshalJSON(v []byte) error { relay := &opts.RelayOptions{} if err := jsonpb.Unmarshal(bytes.NewBuffer(v), relay); err != nil { diff --git a/tunnel/tunnel.go b/tunnel/tunnel.go deleted file mode 100644 index 19087e2af..000000000 --- a/tunnel/tunnel.go +++ /dev/null @@ -1,388 +0,0 @@ -package tunnel - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "os" - "time" - - "github.com/pkg/errors" - uuid "github.com/satori/go.uuid" - "github.com/sirupsen/logrus" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - "github.com/batchcorp/collector-schemas/build/go/protos/services" - - "github.com/batchcorp/plumber-schemas/build/go/protos/opts" - "github.com/batchcorp/plumber-schemas/build/go/protos/records" -) - -const ( - // DefaultTunnelAddress is the default address that the tunnel pkg will - // use if an alternate address is not specified - DefaultTunnelAddress = "dproxy.streamdal.com:443" - - // ReconnectSleep determines the length of time to wait between reconnect attempts to dProxy - ReconnectSleep = time.Second * 5 - - DefaultGRPCTimeoutSeconds = 5 -) - -var ( - ErrNotAuthorized = errors.New("not authorized") -) - -//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . ITunnel -type ITunnel interface { - // Start is called by a backend's Tunnel() method. It authorizes the connection - // and begins reading a GRPC stream of responses consisting of Tunnel protobuf - // messages. - Start(ctx context.Context, bus string, errorCh chan<- *records.ErrorRecord) error - - // Read returns a channel that will receive replay events from the dProxy service - Read() chan *events.Outbound - - // Close closes the connection to dProxy service - Close() error - - // Delete deletes a tunnel from dProxy and from ui-bff destinations. - // This should be called after the tunnel is deleted from the plumber cluster to ensure proper cleanup. - Delete(ctx context.Context, tunnelID string) error -} - -type Client struct { - Client services.DProxyClient - Conn *grpc.ClientConn - Token string - log *logrus.Entry - PlumberClusterID string - PlumberID string - PlumberVersion string - OutboundMessageCh chan *events.Outbound - - Options *opts.TunnelOptions -} - -type Config struct { - PlumberVersion string - PlumberClusterID string - PlumberID string -} - -// New validates CLI options and returns a new Client struct -func New(opts *opts.TunnelOptions, cfg *Config) (ITunnel, error) { - if err := validateTunnelOptions(opts); err != nil { - return nil, errors.Wrap(err, "unable to validate tunnel options") - } - - grpcConnTimeout := time.Duration(opts.XGrpcTimeoutSeconds) * time.Second - - ctx, _ := context.WithTimeout(context.Background(), grpcConnTimeout) - - conn, err := grpc.DialContext(ctx, opts.XGrpcAddress, getDialOptions(opts)...) - if err != nil { - return nil, errors.Wrapf(err, "unable to open connection to %s", opts.XGrpcAddress) - } - - dClient := &Client{ - Client: services.NewDProxyClient(conn), - Conn: conn, - Token: opts.ApiToken, - OutboundMessageCh: make(chan *events.Outbound, 1), - Options: opts, - PlumberClusterID: cfg.PlumberClusterID, - PlumberID: cfg.PlumberID, - PlumberVersion: cfg.PlumberVersion, - log: logrus.WithField("pkg", "tunnel"), - } - - return dClient, nil -} - -func validateTunnelOptions(opts *opts.TunnelOptions) error { - if opts == nil { - return errors.New("opts cannot be nil") - } - - if opts.ApiToken == "" { - return errors.New("api token cannot be empty") - } - - if opts.XGrpcAddress == "" { - opts.XGrpcAddress = DefaultTunnelAddress - } - - if opts.XGrpcTimeoutSeconds == 0 { - opts.XGrpcTimeoutSeconds = DefaultGRPCTimeoutSeconds - } - - return nil -} - -func (d *Client) Close() error { - return d.Conn.Close() -} - -func (d *Client) reconnect() error { - conn, err := grpc.Dial(d.Options.XGrpcAddress, getDialOptions(d.Options)...) - if err != nil { - return errors.Wrapf(err, "unable to reconnect to %s", d.Options.XGrpcAddress) - } - - d.Conn = conn - - d.Client = services.NewDProxyClient(conn) - return nil -} - -// getDialOptions returns all necessary grpc dial options to connect to dProxy -func getDialOptions(opts *opts.TunnelOptions) []grpc.DialOption { - dialOpts := []grpc.DialOption{grpc.WithBlock()} - - if opts.XGrpcInsecure { - dialOpts = append(dialOpts, grpc.WithInsecure()) - } else { - dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS( - &tls.Config{ - InsecureSkipVerify: true, - }, - ))) - } - - return dialOpts -} - -func (d *Client) Read() chan *events.Outbound { - return d.OutboundMessageCh -} - -func (d *Client) Start(ctx context.Context, bus string, errorCh chan<- *records.ErrorRecord) error { - go func() { - var err error - var stream services.DProxy_ConnectClient - - // TODO: Should be a looper - for { - if stream == nil { - // Try to connect forever - stream, err = d.connect(ctx, bus) - if err != nil { - if err == context.Canceled { - d.notify(err, "context cancelled during connect", errorCh, logrus.DebugLevel) - break - } - - // Error we can probably recover from -> force reconnect after sleep - stream = nil - d.log.Error(err) - time.Sleep(ReconnectSleep) - - continue - } - } - - response, err := stream.Recv() - if err != nil { - if err.Error() == "rpc error: code = Canceled desc = context canceled" { - d.notify(err, "context cancelled during recv", errorCh, logrus.DebugLevel) - break - } - - if errors.Is(err, io.EOF) { - // Nicer reconnect messages - d.log.Warningf("dProxy server is unavailable, retrying in %s...", ReconnectSleep.String()) - } else { - d.log.Warningf("Error receiving message, retrying in %s: %s", ReconnectSleep.String(), err) - } - - // Stream is no longer useful. Need to get a new one on reconnect - stream = nil - - // Attempt to reconnect. On the next loop iteration, stream == nil check will be hit, and assuming we've - // reconnected at that point, a new stream will be opened and authorized - d.reconnect() - - time.Sleep(ReconnectSleep) - continue - } - - if err := d.handleResponse(ctx, response); err != nil { - if err == ErrNotAuthorized { - d.notify(err, "API token was not accepted for tunnel - bailing out", errorCh, logrus.DebugLevel) - break - } - - d.log.Warningf("error handling response for msg type '%s' (recoverable): %s", response.Type, err) - } - } - - d.log.Debug("Start() goroutine exiting") - }() - - return nil -} - -func (d *Client) Delete(ctx context.Context, tunnelID string) error { - resp, err := d.Client.DeleteTunnel(ctx, &events.DeleteTunnelRequest{ - Token: d.Token, - PlumberClusterId: d.PlumberClusterID, - TunnelId: tunnelID, - }) - if err != nil { - return errors.Wrap(err, "unable to delete tunnel in dProxy service") - } - if !resp.Success { - return errors.New(resp.Message) - } - - return nil -} - -func (d *Client) notify(err error, desc string, errorCh chan<- *records.ErrorRecord, logLevel logrus.Level) { - if errorCh != nil { - errRecord := &records.ErrorRecord{ - OccurredAtUnixTsUtc: time.Now().UTC().Unix(), - Error: err.Error(), - } - - if desc != "" { - errRecord.Metadata = map[string][]byte{ - "desc": []byte(desc), - } - } - - errorCh <- errRecord - } - - var fullErr string - - if desc != "" { - fullErr = fmt.Sprintf("%s: %s", desc, err.Error()) - } else { - fullErr = err.Error() - } - - d.log.Logger.Log(logLevel, fullErr) -} - -// connect opens a GRPC connection to dProxy. It is called by Start -func (d *Client) connect(ctx context.Context, bus string) (services.DProxy_ConnectClient, error) { - authRequest := &events.Tunnel{ - Type: events.Tunnel_AUTH_REQUEST, - Payload: &events.Tunnel_AuthRequest{ - AuthRequest: &events.AuthRequest{ - Token: d.Token, - MessageBus: bus, - PlumberClusterId: d.PlumberClusterID, - Name: d.Options.Name, - TunnelId: d.Options.XTunnelId, - PlumberVersion: "", - PlumberId: d.PlumberID, - }, - }, - } - - // dProxy needs a unique ID to track the connection - // This value can be anything, so long as it is unique - md := metadata.Pairs("plumber-stream-id", uuid.NewV4().String()) - ctx = metadata.NewOutgoingContext(ctx, md) - - return d.Client.Connect(ctx, authRequest) -} - -// handleResponse receives a tunnel message and determines which method should handle it based on the payload -func (d *Client) handleResponse(_ context.Context, resp *events.Tunnel) error { - switch resp.Type { - case events.Tunnel_AUTH_RESPONSE: - return d.handleAuthResponse(resp) - case events.Tunnel_OUTBOUND_MESSAGE: - return d.handleOutboundMessage(resp) - case events.Tunnel_REPLAY_EVENT: - return d.handleReplayEvent(resp) - case events.Tunnel_TUNNEL_EVENT: - return d.handleTunnelEvent(resp) - case events.Tunnel_UNSET: - // Noop used by dproxy to keep connection open - return nil - default: - return errors.Errorf("unknown response type: %s", resp.Type) - } -} - -// handleAuthResponse handles a AUTH_RESPONSE payload from a Tunnel protobuf message -func (d *Client) handleAuthResponse(resp *events.Tunnel) error { - authResponse := resp.GetAuthResponse() - if authResponse == nil { - if err := d.Conn.Close(); err != nil { - d.log.Error("could not cleanly disconnect from server") - } - - return fmt.Errorf("received invalid auth response from server: %+v", authResponse) - } - - if authResponse.Authorized == false { - if err := d.Conn.Close(); err != nil { - d.log.Error("could not cleanly disconnect from server") - } - - return ErrNotAuthorized - } - - d.log.Debug("Connection authorized for replay tunnel") - - return nil -} - -// handleOutboundMessage handles a REPLAY_MESSAGE payload from a Tunnel protobuf message -func (d *Client) handleOutboundMessage(resp *events.Tunnel) error { - d.log.Debugf("received message for replay '%s'", resp.ReplayId) - - outbound := resp.GetOutboundMessage() - - // Ignore - if outbound.Last { - return nil - } - - d.OutboundMessageCh <- outbound - - return nil -} - -// handleReplayEvent handles a REPLAY_MESSAGE payload from a Tunnel protobuf message -func (d *Client) handleReplayEvent(resp *events.Tunnel) error { - llog := d.log.WithField("replay_id", resp.ReplayId) - event := resp.GetReplayMessage() - - switch event.Type { - case events.ReplayEvent_CREATE_REPLAY: - llog.Infof("Replay '%s' starting", resp.ReplayId) - case events.ReplayEvent_PAUSE_REPLAY: - llog.Infof("Replay '%s' paused", resp.ReplayId) - case events.ReplayEvent_RESUME_REPLAY: - llog.Infof("Replay '%s' resuming", resp.ReplayId) - case events.ReplayEvent_ABORT_REPLAY: - llog.Errorf("Replay '%s' aborted", resp.ReplayId) - case events.ReplayEvent_FINISH_REPLAY: - llog.Infof("Replay '%s' finished!", resp.ReplayId) - } - - return nil -} - -func (d *Client) handleTunnelEvent(resp *events.Tunnel) error { - event := resp.GetTunnelEvent() - - switch event.Type { - case events.TunnelEvent_FORCE_DISCONNECT: - d.log.Error("Tunnel deleted, plumber exiting") - os.Exit(0) - } - - return nil -} diff --git a/tunnel/tunnelfakes/fake_itunnel.go b/tunnel/tunnelfakes/fake_itunnel.go deleted file mode 100644 index 85c645f9d..000000000 --- a/tunnel/tunnelfakes/fake_itunnel.go +++ /dev/null @@ -1,326 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package tunnelfakes - -import ( - "context" - "sync" - - "github.com/batchcorp/collector-schemas/build/go/protos/events" - - "github.com/batchcorp/plumber-schemas/build/go/protos/records" - - "github.com/streamdal/plumber/tunnel" -) - -type FakeITunnel struct { - CloseStub func() error - closeMutex sync.RWMutex - closeArgsForCall []struct { - } - closeReturns struct { - result1 error - } - closeReturnsOnCall map[int]struct { - result1 error - } - DeleteStub func(context.Context, string) error - deleteMutex sync.RWMutex - deleteArgsForCall []struct { - arg1 context.Context - arg2 string - } - deleteReturns struct { - result1 error - } - deleteReturnsOnCall map[int]struct { - result1 error - } - ReadStub func() chan *events.Outbound - readMutex sync.RWMutex - readArgsForCall []struct { - } - readReturns struct { - result1 chan *events.Outbound - } - readReturnsOnCall map[int]struct { - result1 chan *events.Outbound - } - StartStub func(context.Context, string, chan<- *records.ErrorRecord) error - startMutex sync.RWMutex - startArgsForCall []struct { - arg1 context.Context - arg2 string - arg3 chan<- *records.ErrorRecord - } - startReturns struct { - result1 error - } - startReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *FakeITunnel) Close() error { - fake.closeMutex.Lock() - ret, specificReturn := fake.closeReturnsOnCall[len(fake.closeArgsForCall)] - fake.closeArgsForCall = append(fake.closeArgsForCall, struct { - }{}) - stub := fake.CloseStub - fakeReturns := fake.closeReturns - fake.recordInvocation("Close", []interface{}{}) - fake.closeMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeITunnel) CloseCallCount() int { - fake.closeMutex.RLock() - defer fake.closeMutex.RUnlock() - return len(fake.closeArgsForCall) -} - -func (fake *FakeITunnel) CloseCalls(stub func() error) { - fake.closeMutex.Lock() - defer fake.closeMutex.Unlock() - fake.CloseStub = stub -} - -func (fake *FakeITunnel) CloseReturns(result1 error) { - fake.closeMutex.Lock() - defer fake.closeMutex.Unlock() - fake.CloseStub = nil - fake.closeReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeITunnel) CloseReturnsOnCall(i int, result1 error) { - fake.closeMutex.Lock() - defer fake.closeMutex.Unlock() - fake.CloseStub = nil - if fake.closeReturnsOnCall == nil { - fake.closeReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.closeReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeITunnel) Delete(arg1 context.Context, arg2 string) error { - fake.deleteMutex.Lock() - ret, specificReturn := fake.deleteReturnsOnCall[len(fake.deleteArgsForCall)] - fake.deleteArgsForCall = append(fake.deleteArgsForCall, struct { - arg1 context.Context - arg2 string - }{arg1, arg2}) - stub := fake.DeleteStub - fakeReturns := fake.deleteReturns - fake.recordInvocation("Delete", []interface{}{arg1, arg2}) - fake.deleteMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeITunnel) DeleteCallCount() int { - fake.deleteMutex.RLock() - defer fake.deleteMutex.RUnlock() - return len(fake.deleteArgsForCall) -} - -func (fake *FakeITunnel) DeleteCalls(stub func(context.Context, string) error) { - fake.deleteMutex.Lock() - defer fake.deleteMutex.Unlock() - fake.DeleteStub = stub -} - -func (fake *FakeITunnel) DeleteArgsForCall(i int) (context.Context, string) { - fake.deleteMutex.RLock() - defer fake.deleteMutex.RUnlock() - argsForCall := fake.deleteArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *FakeITunnel) DeleteReturns(result1 error) { - fake.deleteMutex.Lock() - defer fake.deleteMutex.Unlock() - fake.DeleteStub = nil - fake.deleteReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeITunnel) DeleteReturnsOnCall(i int, result1 error) { - fake.deleteMutex.Lock() - defer fake.deleteMutex.Unlock() - fake.DeleteStub = nil - if fake.deleteReturnsOnCall == nil { - fake.deleteReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.deleteReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeITunnel) Read() chan *events.Outbound { - fake.readMutex.Lock() - ret, specificReturn := fake.readReturnsOnCall[len(fake.readArgsForCall)] - fake.readArgsForCall = append(fake.readArgsForCall, struct { - }{}) - stub := fake.ReadStub - fakeReturns := fake.readReturns - fake.recordInvocation("Read", []interface{}{}) - fake.readMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeITunnel) ReadCallCount() int { - fake.readMutex.RLock() - defer fake.readMutex.RUnlock() - return len(fake.readArgsForCall) -} - -func (fake *FakeITunnel) ReadCalls(stub func() chan *events.Outbound) { - fake.readMutex.Lock() - defer fake.readMutex.Unlock() - fake.ReadStub = stub -} - -func (fake *FakeITunnel) ReadReturns(result1 chan *events.Outbound) { - fake.readMutex.Lock() - defer fake.readMutex.Unlock() - fake.ReadStub = nil - fake.readReturns = struct { - result1 chan *events.Outbound - }{result1} -} - -func (fake *FakeITunnel) ReadReturnsOnCall(i int, result1 chan *events.Outbound) { - fake.readMutex.Lock() - defer fake.readMutex.Unlock() - fake.ReadStub = nil - if fake.readReturnsOnCall == nil { - fake.readReturnsOnCall = make(map[int]struct { - result1 chan *events.Outbound - }) - } - fake.readReturnsOnCall[i] = struct { - result1 chan *events.Outbound - }{result1} -} - -func (fake *FakeITunnel) Start(arg1 context.Context, arg2 string, arg3 chan<- *records.ErrorRecord) error { - fake.startMutex.Lock() - ret, specificReturn := fake.startReturnsOnCall[len(fake.startArgsForCall)] - fake.startArgsForCall = append(fake.startArgsForCall, struct { - arg1 context.Context - arg2 string - arg3 chan<- *records.ErrorRecord - }{arg1, arg2, arg3}) - stub := fake.StartStub - fakeReturns := fake.startReturns - fake.recordInvocation("Start", []interface{}{arg1, arg2, arg3}) - fake.startMutex.Unlock() - if stub != nil { - return stub(arg1, arg2, arg3) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *FakeITunnel) StartCallCount() int { - fake.startMutex.RLock() - defer fake.startMutex.RUnlock() - return len(fake.startArgsForCall) -} - -func (fake *FakeITunnel) StartCalls(stub func(context.Context, string, chan<- *records.ErrorRecord) error) { - fake.startMutex.Lock() - defer fake.startMutex.Unlock() - fake.StartStub = stub -} - -func (fake *FakeITunnel) StartArgsForCall(i int) (context.Context, string, chan<- *records.ErrorRecord) { - fake.startMutex.RLock() - defer fake.startMutex.RUnlock() - argsForCall := fake.startArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 -} - -func (fake *FakeITunnel) StartReturns(result1 error) { - fake.startMutex.Lock() - defer fake.startMutex.Unlock() - fake.StartStub = nil - fake.startReturns = struct { - result1 error - }{result1} -} - -func (fake *FakeITunnel) StartReturnsOnCall(i int, result1 error) { - fake.startMutex.Lock() - defer fake.startMutex.Unlock() - fake.StartStub = nil - if fake.startReturnsOnCall == nil { - fake.startReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.startReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *FakeITunnel) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.closeMutex.RLock() - defer fake.closeMutex.RUnlock() - fake.deleteMutex.RLock() - defer fake.deleteMutex.RUnlock() - fake.readMutex.RLock() - defer fake.readMutex.RUnlock() - fake.startMutex.RLock() - defer fake.startMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *FakeITunnel) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ tunnel.ITunnel = new(FakeITunnel) diff --git a/validate/backends.go b/validate/backends.go index 430ffb1a5..8682d3565 100644 --- a/validate/backends.go +++ b/validate/backends.go @@ -29,10 +29,6 @@ var ( ErrEmptyWriteOpts = errors.New("write options cannot be nil") - // Tunnel - - ErrEmptyTunnelOpts = errors.New("tunnel options cannot be nil") - // Relay ErrEmptyRelayOpts = errors.New("relay options cannot be nil") diff --git a/validate/server.go b/validate/server.go index 5f30022c5..887f8b7b4 100644 --- a/validate/server.go +++ b/validate/server.go @@ -79,9 +79,3 @@ func ConnectionOptionsForServer(connOptions *opts.ConnectionOptions) error { return nil } - -func TunnelOptionsForServer(tunnelOptions *opts.TunnelOptions) error { - // TODO: Implement specific tunnel validations - - return nil -} diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml deleted file mode 100644 index ba95cdd15..000000000 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a90..000000000 --- a/vendor/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 91b4ae564..000000000 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,124 +0,0 @@ -# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize`. - -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for -complete documentation. - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83 MB` or `79 MiB` (whichever you prefer). - -Example: - -```go -fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. -``` - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - -```go -fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. -``` - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - -```go -fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. -``` - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - -```go -fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. -``` - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - -```go -fmt.Printf("%f", 2.24) // 2.240000 -fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 -fmt.Printf("%f", 2.0) // 2.000000 -fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 -``` - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - -```go -humanize.SI(0.00000000223, "M") // 2.23 nM -``` - -## English-specific functions - -The following functions are in the `humanize/english` subpackage. - -### Plurals - -Simple English pluralization - -```go -english.PluralWord(1, "object", "") // object -english.PluralWord(42, "object", "") // objects -english.PluralWord(2, "bus", "") // buses -english.PluralWord(99, "locus", "loci") // loci - -english.Plural(1, "object", "") // 1 object -english.Plural(42, "object", "") // 42 objects -english.Plural(2, "bus", "") // 2 buses -english.Plural(99, "locus", "loci") // 99 loci -``` - -### Word series - -Format comma-separated words lists with conjuctions: - -```go -english.WordSeries([]string{"foo"}, "and") // foo -english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar -english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz - -english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz -``` - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc337d..000000000 --- a/vendor/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 1a2bf6172..000000000 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,173 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%d B", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83 MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79 MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42 MB") -> 42000000, nil -// ParseBigBytes("42 mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - val := &big.Rat{} - _, err := fmt.Sscanf(num, "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index 0b498f488..000000000 --- a/vendor/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,143 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%d B", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83 MB -func Bytes(s uint64) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79 MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42 MB") -> 42000000, nil -// ParseBytes("42 mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - f, err := strconv.ParseFloat(num, 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index 520ae3e57..000000000 --- a/vendor/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,116 +0,0 @@ -package humanize - -import ( - "bytes" - "math" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - - // Min int64 can't be negated to a usable value, so it has to be special cased. - if v == math.MinInt64 { - return "-9,223,372,036,854,775,808" - } - - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Commaf(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// CommafWithDigits works like the Commaf but limits the resulting -// string to the given number of decimal places. -// -// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 -func CommafWithDigits(f float64, decimals int) string { - return stripTrailingDigits(Commaf(f), decimals) -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:], ",") -} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go deleted file mode 100644 index 620690dec..000000000 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "bytes" - "math/big" - "strings" -) - -// BigCommaf produces a string form of the given big.Float in base 10 -// with commas after every three orders of magnitude. -func BigCommaf(v *big.Float) string { - buf := &bytes.Buffer{} - if v.Sign() < 0 { - buf.Write([]byte{'-'}) - v.Abs(v) - } - - comma := []byte{','} - - parts := strings.Split(v.Text('f', -1), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index 1c62b640d..000000000 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,46 +0,0 @@ -package humanize - -import ( - "strconv" - "strings" -) - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -func stripTrailingDigits(s string, digits int) string { - if i := strings.Index(s, "."); i >= 0 { - if digits <= 0 { - return s[:i] - } - i++ - if i+digits >= len(s) { - return s - } - return s[:i+digits] - } - return s -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} - -// FtoaWithDigits converts a float to a string but limits the resulting string -// to the given number of decimal places, and no trailing zeros. -func FtoaWithDigits(num float64, digits int) string { - return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) -} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a2c2da31e..000000000 --- a/vendor/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83 MB" or -"79 MiB" (whichever you prefer). -*/ -package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go deleted file mode 100644 index dec618659..000000000 --- a/vendor/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.FormatInt(int64(intf), 10) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a861..000000000 --- a/vendor/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go deleted file mode 100644 index ae659e0e4..000000000 --- a/vendor/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,123 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([\-0-9.]+)\s?([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - mag := math.Abs(input) - exponent := math.Floor(logn(mag, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := mag / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1 M instead of 1000 k - if value == 1000.0 { - exponent += 3 - value = mag / math.Pow(10, exponent) - } - - value = math.Copysign(value, input) - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, "B") -> 1 MB -// e.g. SI(2.2345e-12, "F") -> 2.2345 pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + " " + prefix + unit -} - -// SIWithDigits works like SI but limits the resulting string to the -// given number of decimal places. -// -// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB -// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF -func SIWithDigits(input float64, decimals int, unit string) string { - value, prefix := ComputeSI(input) - return FtoaWithDigits(value, decimals) + " " + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go deleted file mode 100644 index dd3fbf5ef..000000000 --- a/vendor/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,117 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Day = 24 * time.Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -// A RelTimeMagnitude struct contains a relative time point at which -// the relative format of time will switch to a new format string. A -// slice of these in ascending order by their "D" field is passed to -// CustomRelTime to format durations. -// -// The Format field is a string that may contain a "%s" which will be -// replaced with the appropriate signed label (e.g. "ago" or "from -// now") and a "%d" that will be replaced by the quantity. -// -// The DivBy field is the amount of time the time difference must be -// divided by in order to display correctly. -// -// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" -// DivBy should be time.Minute so whatever the duration is will be -// expressed in minutes. -type RelTimeMagnitude struct { - D time.Duration - Format string - DivBy time.Duration -} - -var defaultMagnitudes = []RelTimeMagnitude{ - {time.Second, "now", time.Second}, - {2 * time.Second, "1 second %s", 1}, - {time.Minute, "%d seconds %s", time.Second}, - {2 * time.Minute, "1 minute %s", 1}, - {time.Hour, "%d minutes %s", time.Minute}, - {2 * time.Hour, "1 hour %s", 1}, - {Day, "%d hours %s", time.Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) -} - -// CustomRelTime formats a time into a relative string. -// -// It takes two times two labels and a table of relative time formats. -// In addition to the generic time delta string (e.g. 5 minutes), the -// labels are used applied so that the label corresponding to the -// smaller time is applied. -func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { - lbl := albl - diff := b.Sub(a) - - if a.After(b) { - lbl = blbl - diff = a.Sub(b) - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].D > diff - }) - - if n >= len(magnitudes) { - n = len(magnitudes) - 1 - } - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.Format { - if escaped { - switch ch { - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.DivBy) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.Format, args...) -} diff --git a/vendor/github.com/kataras/tablewriter/.gitignore b/vendor/github.com/kataras/tablewriter/.gitignore deleted file mode 100644 index b66cec635..000000000 --- a/vendor/github.com/kataras/tablewriter/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Created by .ignore support plugin (hsz.mobi) -### Go template -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/kataras/tablewriter/.travis.yml b/vendor/github.com/kataras/tablewriter/.travis.yml deleted file mode 100644 index f156b3b87..000000000 --- a/vendor/github.com/kataras/tablewriter/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - tip diff --git a/vendor/github.com/kataras/tablewriter/LICENCE.md b/vendor/github.com/kataras/tablewriter/LICENCE.md deleted file mode 100644 index 1fd848425..000000000 --- a/vendor/github.com/kataras/tablewriter/LICENCE.md +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 by Oleku Konko - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/kataras/tablewriter/README.md b/vendor/github.com/kataras/tablewriter/README.md deleted file mode 100644 index 59cb86c01..000000000 --- a/vendor/github.com/kataras/tablewriter/README.md +++ /dev/null @@ -1,277 +0,0 @@ -ASCII Table Writer -========= - -[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) -[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter) -[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter) - -Generate ASCII table on the fly ... Installation is simple as - - go get github.com/olekukonko/tablewriter - - -#### Features -- Automatic Padding -- Support Multiple Lines -- Supports Alignment -- Support Custom Separators -- Automatic Alignment of numbers & percentage -- Write directly to http , file etc via `io.Writer` -- Read directly from CSV file -- Optional row line via `SetRowLine` -- Normalise table header -- Make CSV Headers optional -- Enable or disable table border -- Set custom footer support -- Optional identical cells merging -- Set custom caption -- Optional reflowing of paragrpahs in multi-line cells. - -#### Example 1 - Basic -```go -data := [][]string{ - []string{"A", "The Good", "500"}, - []string{"B", "The Very very Bad Man", "288"}, - []string{"C", "The Ugly", "120"}, - []string{"D", "The Gopher", "800"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Sign", "Rating"}) - -for _, v := range data { - table.Append(v) -} -table.Render() // Send output -``` - -##### Output 1 -``` -+------+-----------------------+--------+ -| NAME | SIGN | RATING | -+------+-----------------------+--------+ -| A | The Good | 500 | -| B | The Very very Bad Man | 288 | -| C | The Ugly | 120 | -| D | The Gopher | 800 | -+------+-----------------------+--------+ -``` - -#### Example 2 - Without Border / Footer / Bulk Append -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer -table.SetBorder(false) // Set Border to false -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 2 -``` - - DATE | DESCRIPTION | CV2 | AMOUNT -+----------+--------------------------+-------+---------+ - 1/1/2014 | Domain name | 2233 | $10.98 - 1/1/2014 | January Hosting | 2233 | $54.95 - 1/4/2014 | February Hosting | 2233 | $51.00 - 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 -+----------+--------------------------+-------+---------+ - TOTAL | $146 93 - +-------+---------+ - -``` - - -#### Example 3 - CSV -```go -table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true) -table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment -table.Render() -``` - -##### Output 3 -``` -+----------+--------------+------+-----+---------+----------------+ -| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA | -+----------+--------------+------+-----+---------+----------------+ -| user_id | smallint(5) | NO | PRI | NULL | auto_increment | -| username | varchar(10) | NO | | NULL | | -| password | varchar(100) | NO | | NULL | | -+----------+--------------+------+-----+---------+----------------+ -``` - -#### Example 4 - Custom Separator -```go -table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true) -table.SetRowLine(true) // Enable row line - -// Change table lines -table.SetCenterSeparator("*") -table.SetColumnSeparator("‡") -table.SetRowSeparator("-") - -table.SetAlignment(tablewriter.ALIGN_LEFT) -table.Render() -``` - -##### Output 4 -``` -*------------*-----------*---------* -╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪ -*------------*-----------*---------* -╪ John ╪ Barry ╪ 123456 ╪ -*------------*-----------*---------* -╪ Kathy ╪ Smith ╪ 687987 ╪ -*------------*-----------*---------* -╪ Bob ╪ McCornick ╪ 3979870 ╪ -*------------*-----------*---------* -``` - -#### Example 5 - Markdown Format -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) -table.SetCenterSeparator("|") -table.AppendBulk(data) // Add Bulk Data -table.Render() -``` - -##### Output 5 -``` -| DATE | DESCRIPTION | CV2 | AMOUNT | -|----------|--------------------------|------|--------| -| 1/1/2014 | Domain name | 2233 | $10.98 | -| 1/1/2014 | January Hosting | 2233 | $54.95 | -| 1/4/2014 | February Hosting | 2233 | $51.00 | -| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 | -``` - -#### Example 6 - Identical cells merging -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "1234", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2345", "$54.95"}, - []string{"1/4/2014", "February Hosting", "3456", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) -table.SetAutoMergeCells(true) -table.SetRowLine(true) -table.AppendBulk(data) -table.Render() -``` - -##### Output 6 -``` -+----------+--------------------------+-------+---------+ -| DATE | DESCRIPTION | CV2 | AMOUNT | -+----------+--------------------------+-------+---------+ -| 1/1/2014 | Domain name | 1234 | $10.98 | -+ +--------------------------+-------+---------+ -| | January Hosting | 2345 | $54.95 | -+----------+--------------------------+-------+---------+ -| 1/4/2014 | February Hosting | 3456 | $51.00 | -+ +--------------------------+-------+---------+ -| | February Extra Bandwidth | 4567 | $30.00 | -+----------+--------------------------+-------+---------+ -| TOTAL | $146 93 | -+----------+--------------------------+-------+---------+ -``` - - -#### Table with color -```go -data := [][]string{ - []string{"1/1/2014", "Domain name", "2233", "$10.98"}, - []string{"1/1/2014", "January Hosting", "2233", "$54.95"}, - []string{"1/4/2014", "February Hosting", "2233", "$51.00"}, - []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Date", "Description", "CV2", "Amount"}) -table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer -table.SetBorder(false) // Set Border to false - -table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor}, - tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor}, - tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor}, - tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor}) - -table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor}, - tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor}) - -table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{}, - tablewriter.Colors{tablewriter.Bold}, - tablewriter.Colors{tablewriter.FgHiRedColor}) - -table.AppendBulk(data) -table.Render() -``` - -#### Table with color Output -![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png) - -#### Example 6 - Set table caption -```go -data := [][]string{ - []string{"A", "The Good", "500"}, - []string{"B", "The Very very Bad Man", "288"}, - []string{"C", "The Ugly", "120"}, - []string{"D", "The Gopher", "800"}, -} - -table := tablewriter.NewWriter(os.Stdout) -table.SetHeader([]string{"Name", "Sign", "Rating"}) -table.SetCaption(true, "Movie ratings.") - -for _, v := range data { - table.Append(v) -} -table.Render() // Send output -``` - -Note: Caption text will wrap with total width of rendered table. - -##### Output 6 -``` -+------+-----------------------+--------+ -| NAME | SIGN | RATING | -+------+-----------------------+--------+ -| A | The Good | 500 | -| B | The Very very Bad Man | 288 | -| C | The Ugly | 120 | -| D | The Gopher | 800 | -+------+-----------------------+--------+ -Movie ratings. -``` - -#### TODO -- ~~Import Directly from CSV~~ - `done` -- ~~Support for `SetFooter`~~ - `done` -- ~~Support for `SetBorder`~~ - `done` -- ~~Support table with uneven rows~~ - `done` -- ~~Support custom alignment~~ -- General Improvement & Optimisation -- `NewHTML` Parse table from HTML diff --git a/vendor/github.com/kataras/tablewriter/csv.go b/vendor/github.com/kataras/tablewriter/csv.go deleted file mode 100644 index 98878303b..000000000 --- a/vendor/github.com/kataras/tablewriter/csv.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "encoding/csv" - "io" - "os" -) - -// Start A new table by importing from a CSV file -// Takes io.Writer and csv File name -func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) { - file, err := os.Open(fileName) - if err != nil { - return &Table{}, err - } - defer file.Close() - csvReader := csv.NewReader(file) - t, err := NewCSVReader(writer, csvReader, hasHeader) - return t, err -} - -// Start a New Table Writer with csv.Reader -// This enables customisation such as reader.Comma = ';' -// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94 -func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) { - t := NewWriter(writer) - if hasHeader { - // Read the first row - headers, err := csvReader.Read() - if err != nil { - return &Table{}, err - } - t.SetHeader(headers) - } - for { - record, err := csvReader.Read() - if err == io.EOF { - break - } else if err != nil { - return &Table{}, err - } - t.Append(record) - } - return t, nil -} diff --git a/vendor/github.com/kataras/tablewriter/table.go b/vendor/github.com/kataras/tablewriter/table.go deleted file mode 100644 index 22fbb8d32..000000000 --- a/vendor/github.com/kataras/tablewriter/table.go +++ /dev/null @@ -1,884 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -// Create & Generate text based table -package tablewriter - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strings" -) - -const ( - MAX_ROW_WIDTH = 30 -) - -const ( - CENTER = "+" - ROW = "-" - COLUMN = "|" - SPACE = " " - NEWLINE = "\n" -) - -const ( - ALIGN_DEFAULT = iota - ALIGN_CENTER - ALIGN_RIGHT - ALIGN_LEFT -) - -var ( - decimal = regexp.MustCompile(`^-*\d*\.?\d*$`) - percent = regexp.MustCompile(`^-*\d*\.?\d*$%$`) -) - -type Border struct { - Left bool - Right bool - Top bool - Bottom bool -} - -type Table struct { - out io.Writer - rows [][]string - lines [][][]string - cs map[int]int - rs map[int]int - headers [][]string - footers [][]string - caption bool - captionText string - autoFmt bool - autoWrap bool - reflowText bool - mW int - pCenter string - pRow string - pColumn string - tColumn int - tRow int - hAlign int - fAlign int - align int - newLine string - rowLine bool - autoMergeCells bool - hdrLine bool - borders Border - colSize int - headerParams []string - columnsParams []string - footerParams []string - columnsAlign []int -} - -// Start New Table -// Take io.Writer Directly -func NewWriter(writer io.Writer) *Table { - t := &Table{ - out: writer, - rows: [][]string{}, - lines: [][][]string{}, - cs: make(map[int]int), - rs: make(map[int]int), - headers: [][]string{}, - footers: [][]string{}, - caption: false, - captionText: "Table caption.", - autoFmt: true, - autoWrap: true, - reflowText: true, - mW: MAX_ROW_WIDTH, - pCenter: CENTER, - pRow: ROW, - pColumn: COLUMN, - tColumn: -1, - tRow: -1, - hAlign: ALIGN_DEFAULT, - fAlign: ALIGN_DEFAULT, - align: ALIGN_DEFAULT, - newLine: NEWLINE, - rowLine: false, - hdrLine: true, - borders: Border{Left: true, Right: true, Bottom: true, Top: true}, - colSize: -1, - headerParams: []string{}, - columnsParams: []string{}, - footerParams: []string{}, - columnsAlign: []int{}} - return t -} - -// SetOutput sets the table output target. -func (t *Table) SetOutput(w io.Writer) { - t.out = w -} - -// RenderRowOnce renders a single row without actually appending to the existing, so it can be re-used more than one times. -// It will also render without caption, headers and footer, it can be used after the table has been printed at least once. -func (t *Table) RenderRowOnce(row []string) int { - // don't print headers - // but keep the `t.cs` which will help align the rows based on the previous renders. - bckp := t.lines - t.lines = [][][]string{} - - t.Append(row) - - if t.autoMergeCells { - t.printRowsMergeCells() - } else { - t.printRows() - } - - t.lines = bckp - n := t.NumLines() - - return n -} - -// Render table output -func (t *Table) Render() { - if t.borders.Top { - t.printLine(true) - } - t.printHeading() - if t.autoMergeCells { - t.printRowsMergeCells() - } else { - t.printRows() - } - if !t.rowLine && t.borders.Bottom { - t.printLine(true) - } - t.printFooter() - - if t.caption { - t.printCaption() - } -} - -const ( - headerRowIdx = -1 - footerRowIdx = -2 -) - -// Set table header -func (t *Table) SetHeader(keys []string) { - t.colSize = len(keys) - for i, v := range keys { - lines := t.parseDimension(v, i, headerRowIdx) - t.headers = append(t.headers, lines) - } -} - -// Set table Footer -func (t *Table) SetFooter(keys []string) { - //t.colSize = len(keys) - for i, v := range keys { - lines := t.parseDimension(v, i, footerRowIdx) - t.footers = append(t.footers, lines) - } -} - -// Set table Caption -func (t *Table) SetCaption(caption bool, captionText ...string) { - t.caption = caption - if len(captionText) == 1 { - t.captionText = captionText[0] - } -} - -// Turn header autoformatting on/off. Default is on (true). -func (t *Table) SetAutoFormatHeaders(auto bool) { - t.autoFmt = auto -} - -// Turn automatic multiline text adjustment on/off. Default is on (true). -func (t *Table) SetAutoWrapText(auto bool) { - t.autoWrap = auto -} - -// Turn automatic reflowing of multiline text when rewrapping. Default is on (true). -func (t *Table) SetReflowDuringAutoWrap(auto bool) { - t.reflowText = auto -} - -// Set the Default column width -func (t *Table) SetColWidth(width int) { - t.mW = width -} - -// Set the minimal width for a column -func (t *Table) SetColMinWidth(column int, width int) { - t.cs[column] = width -} - -// Set the Column Separator -func (t *Table) SetColumnSeparator(sep string) { - t.pColumn = sep -} - -// Set the Row Separator -func (t *Table) SetRowSeparator(sep string) { - t.pRow = sep -} - -// Set the center Separator -func (t *Table) SetCenterSeparator(sep string) { - t.pCenter = sep -} - -// Set Header Alignment -func (t *Table) SetHeaderAlignment(hAlign int) { - t.hAlign = hAlign -} - -// Set Footer Alignment -func (t *Table) SetFooterAlignment(fAlign int) { - t.fAlign = fAlign -} - -// Set Table Alignment -func (t *Table) SetAlignment(align int) { - t.align = align -} - -func (t *Table) SetColumnAlignment(keys []int) { - for _, v := range keys { - switch v { - case ALIGN_CENTER: - break - case ALIGN_LEFT: - break - case ALIGN_RIGHT: - break - default: - v = ALIGN_DEFAULT - } - t.columnsAlign = append(t.columnsAlign, v) - } -} - -// Set New Line -func (t *Table) SetNewLine(nl string) { - t.newLine = nl -} - -// Set Header Line -// This would enable / disable a line after the header -func (t *Table) SetHeaderLine(line bool) { - t.hdrLine = line -} - -// Set Row Line -// This would enable / disable a line on each row of the table -func (t *Table) SetRowLine(line bool) { - t.rowLine = line -} - -// Set Auto Merge Cells -// This would enable / disable the merge of cells with identical values -func (t *Table) SetAutoMergeCells(auto bool) { - t.autoMergeCells = auto -} - -// Set Table Border -// This would enable / disable line around the table -func (t *Table) SetBorder(border bool) { - t.SetBorders(Border{border, border, border, border}) -} - -func (t *Table) SetBorders(border Border) { - t.borders = border -} - -func (t *Table) extractLine(row []string) [][]string { - rowSize := len(t.headers) - if rowSize > t.colSize { - t.colSize = rowSize - } - - n := len(t.lines) - line := [][]string{} - for i, v := range row { - - // Detect string width - // Detect String height - // Break strings into words - out := t.parseDimension(v, i, n) - - // Append broken words - line = append(line, out) - } - - return line -} - -// Append row to table -func (t *Table) Append(row []string) { - line := t.extractLine(row) - t.lines = append(t.lines, line) -} - -// Allow Support for Bulk Append -// Eliminates repeated for loops -func (t *Table) AppendBulk(rows [][]string) { - for _, row := range rows { - t.Append(row) - } -} - -// NumLines to get the number of lines -func (t *Table) NumLines() int { - return len(t.lines) -} - -// Clear rows -func (t *Table) ClearRows() { - t.lines = [][][]string{} -} - -// ClearHeaders removes all the table's headers. -func (t *Table) ClearHeaders() { - t.headers = t.headers[0:0] - - // reset the column separators, otherwise we will have empty headers if the length of the new headers is smaller than the current one. - if len(t.cs) > 0 { - for k := range t.cs { - delete(t.cs, k) - } - } -} - -// Clear footer -func (t *Table) ClearFooter() { - t.footers = [][]string{} -} - -// Print line based on row width -func (t *Table) printLine(nl bool) { - fmt.Fprint(t.out, t.pCenter) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.pCenter) - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Print line based on row width with our without cell separator -func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) { - fmt.Fprint(t.out, t.pCenter) - for i := 0; i < len(t.cs); i++ { - v := t.cs[i] - if i > len(displayCellSeparator) || displayCellSeparator[i] { - // Display the cell separator - fmt.Fprintf(t.out, "%s%s%s%s", - t.pRow, - strings.Repeat(string(t.pRow), v), - t.pRow, - t.pCenter) - } else { - // Don't display the cell separator for this cell - fmt.Fprintf(t.out, "%s%s", - strings.Repeat(" ", v+2), - t.pCenter) - } - } - if nl { - fmt.Fprint(t.out, t.newLine) - } -} - -// Return the PadRight function if align is left, PadLeft if align is right, -// and Pad by default -func pad(align int) func(string, string, int) string { - padFunc := Pad - switch align { - case ALIGN_LEFT: - padFunc = PadRight - case ALIGN_RIGHT: - padFunc = PadLeft - } - return padFunc -} - -// Print heading information -func (t *Table) printHeading() { - // Check if headers is available - if len(t.headers) < 1 { - return - } - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.hAlign) - - // Checking for ANSI escape sequences for header - is_esc_seq := false - if len(t.headerParams) > 0 { - is_esc_seq = true - } - - // Maximum height. - max := t.rs[headerRowIdx] - - // Print Heading - for x := 0; x < max; x++ { - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - - for y := 0; y <= end; y++ { - v := t.cs[y] - h := "" - if y < len(t.headers) && x < len(t.headers[y]) { - h = t.headers[y][x] - } - if t.autoFmt { - h = Title(h) - } - pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn) - - if is_esc_seq { - fmt.Fprintf(t.out, " %s %s", - format(padFunc(h, SPACE, v), - t.headerParams[y]), pad) - } else { - fmt.Fprintf(t.out, " %s %s", - padFunc(h, SPACE, v), - pad) - } - } - // Next line - fmt.Fprint(t.out, t.newLine) - } - if t.hdrLine { - t.printLine(true) - } -} - -// Print heading information -func (t *Table) printFooter() { - // Check if headers is available - if len(t.footers) < 1 { - return - } - - // Only print line if border is not set - if !t.borders.Bottom { - t.printLine(true) - } - - // Identify last column - end := len(t.cs) - 1 - - // Get pad function - padFunc := pad(t.fAlign) - - // Checking for ANSI escape sequences for header - is_esc_seq := false - if len(t.footerParams) > 0 { - is_esc_seq = true - } - - // Maximum height. - max := t.rs[footerRowIdx] - - // Print Footer - erasePad := make([]bool, len(t.footers)) - for x := 0; x < max; x++ { - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE)) - - for y := 0; y <= end; y++ { - v := t.cs[y] - f := "" - if y < len(t.footers) && x < len(t.footers[y]) { - f = t.footers[y][x] - } - if t.autoFmt { - f = Title(f) - } - pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn) - - if erasePad[y] || (x == 0 && len(f) == 0) { - pad = SPACE - erasePad[y] = true - } - - if is_esc_seq { - fmt.Fprintf(t.out, " %s %s", - format(padFunc(f, SPACE, v), - t.footerParams[y]), pad) - } else { - fmt.Fprintf(t.out, " %s %s", - padFunc(f, SPACE, v), - pad) - } - - //fmt.Fprintf(t.out, " %s %s", - // padFunc(f, SPACE, v), - // pad) - } - // Next line - fmt.Fprint(t.out, t.newLine) - //t.printLine(true) - } - - hasPrinted := false - - for i := 0; i <= end; i++ { - v := t.cs[i] - pad := t.pRow - center := t.pCenter - length := len(t.footers[i][0]) - - if length > 0 { - hasPrinted = true - } - - // Set center to be space if length is 0 - if length == 0 && !t.borders.Right { - center = SPACE - } - - // Print first junction - if i == 0 { - fmt.Fprint(t.out, center) - } - - // Pad With space of length is 0 - if length == 0 { - pad = SPACE - } - // Ignore left space of it has printed before - if hasPrinted || t.borders.Left { - pad = t.pRow - center = t.pCenter - } - - // Change Center start position - if center == SPACE { - if i < end && len(t.footers[i+1][0]) != 0 { - center = t.pCenter - } - } - - // Print the footer - fmt.Fprintf(t.out, "%s%s%s%s", - pad, - strings.Repeat(string(pad), v), - pad, - center) - - } - - fmt.Fprint(t.out, t.newLine) -} - -// Print caption text -func (t Table) printCaption() { - width := t.getTableWidth() - paragraph, _ := WrapString(t.captionText, width) - for linecount := 0; linecount < len(paragraph); linecount++ { - fmt.Fprintln(t.out, paragraph[linecount]) - } -} - -// Calculate the total number of characters in a row -func (t Table) getTableWidth() int { - var chars int - for _, v := range t.cs { - chars += v - } - - // Add chars, spaces, seperators to calculate the total width of the table. - // ncols := t.colSize - // spaces := ncols * 2 - // seps := ncols + 1 - - return (chars + (3 * t.colSize) + 2) -} - -func (t Table) printRows() { - for i, lines := range t.lines { - t.printRow(lines, i) - } -} - -func (t *Table) fillAlignment(num int) { - if len(t.columnsAlign) < num { - t.columnsAlign = make([]int, num) - for i := range t.columnsAlign { - t.columnsAlign[i] = t.align - } - } -} - -// Print Row Information -// Adjust column alignment based on type - -func (t *Table) printRow(columns [][]string, rowIdx int) { - // Get Maximum Height - max := t.rs[rowIdx] - total := len(columns) - - // TODO Fix uneven col size - // if total < t.colSize { - // for n := t.colSize - total; n < t.colSize ; n++ { - // columns = append(columns, []string{SPACE}) - // t.cs[n] = t.mW - // } - //} - - // Pad Each Height - pads := []int{} - - // Checking for ANSI escape sequences for columns - is_esc_seq := false - if len(t.columnsParams) > 0 { - is_esc_seq = true - } - t.fillAlignment(total) - - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - //fmt.Println(max, "\n") - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - - fmt.Fprintf(t.out, SPACE) - str := columns[y][x] - - // Embedding escape sequence with column value - if is_esc_seq { - str = format(str, t.columnsParams[y]) - } - - // This would print alignment - // Default alignment would use multiple configuration - switch t.columnsAlign[y] { - case ALIGN_CENTER: // - fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - - // TODO Custom alignment per column - //if max == 1 || pads[y] > 0 { - // fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) - //} else { - // fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) - //} - - } - } - fmt.Fprintf(t.out, SPACE) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) - fmt.Fprint(t.out, t.newLine) - } - - if t.rowLine { - t.printLine(true) - } -} - -// Print the rows of the table and merge the cells that are identical -func (t *Table) printRowsMergeCells() { - var previousLine []string - var displayCellBorder []bool - var tmpWriter bytes.Buffer - for i, lines := range t.lines { - // We store the display of the current line in a tmp writer, as we need to know which border needs to be print above - previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine) - if i > 0 { //We don't need to print borders above first line - if t.rowLine { - t.printLineOptionalCellSeparators(true, displayCellBorder) - } - } - tmpWriter.WriteTo(t.out) - } - //Print the end of the table - if t.rowLine { - t.printLine(true) - } -} - -// Print Row Information to a writer and merge identical cells. -// Adjust column alignment based on type - -func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) { - // Get Maximum Height - max := t.rs[rowIdx] - total := len(columns) - - // Pad Each Height - pads := []int{} - - for i, line := range columns { - length := len(line) - pad := max - length - pads = append(pads, pad) - for n := 0; n < pad; n++ { - columns[i] = append(columns[i], " ") - } - } - - var displayCellBorder []bool - t.fillAlignment(total) - for x := 0; x < max; x++ { - for y := 0; y < total; y++ { - - // Check if border is set - fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) - - fmt.Fprintf(writer, SPACE) - - str := columns[y][x] - - if t.autoMergeCells { - //Store the full line to merge mutli-lines cells - fullLine := strings.Join(columns[y], " ") - if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" { - // If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty. - displayCellBorder = append(displayCellBorder, false) - str = "" - } else { - // First line or different content, keep the content and print the cell border - displayCellBorder = append(displayCellBorder, true) - } - } - - // This would print alignment - // Default alignment would use multiple configuration - switch t.columnsAlign[y] { - case ALIGN_CENTER: // - fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y])) - case ALIGN_RIGHT: - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - case ALIGN_LEFT: - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - default: - if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { - fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y])) - } else { - fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y])) - } - } - fmt.Fprintf(writer, SPACE) - } - // Check if border is set - // Replace with space if not set - fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE)) - fmt.Fprint(writer, t.newLine) - } - - //The new previous line is the current one - previousLine = make([]string, total) - for y := 0; y < total; y++ { - previousLine[y] = strings.Join(columns[y], " ") //Store the full line for multi-lines cells - } - //Returns the newly added line and wether or not a border should be displayed above. - return previousLine, displayCellBorder -} - -func (t *Table) parseDimension(str string, colKey, rowKey int) []string { - var ( - raw []string - maxWidth int - ) - - raw = getLines(str) - maxWidth = 0 - for _, line := range raw { - if w := DisplayWidth(line); w > maxWidth { - maxWidth = w - } - } - - // If wrapping, ensure that all paragraphs in the cell fit in the - // specified width. - if t.autoWrap { - // If there's a maximum allowed width for wrapping, use that. - if maxWidth > t.mW { - maxWidth = t.mW - } - - // In the process of doing so, we need to recompute maxWidth. This - // is because perhaps a word in the cell is longer than the - // allowed maximum width in t.mW. - newMaxWidth := maxWidth - newRaw := make([]string, 0, len(raw)) - - if t.reflowText { - // Make a single paragraph of everything. - raw = []string{strings.Join(raw, " ")} - } - for i, para := range raw { - paraLines, _ := WrapString(para, maxWidth) - for _, line := range paraLines { - if w := DisplayWidth(line); w > newMaxWidth { - newMaxWidth = w - } - } - if i > 0 { - newRaw = append(newRaw, " ") - } - newRaw = append(newRaw, paraLines...) - } - raw = newRaw - maxWidth = newMaxWidth - } - - // Store the new known maximum width. - v, ok := t.cs[colKey] - if !ok || v < maxWidth || v == 0 { - t.cs[colKey] = maxWidth - } - - // Remember the number of lines for the row printer. - h := len(raw) - v, ok = t.rs[rowKey] - - if !ok || v < h || v == 0 { - t.rs[rowKey] = h - } - //fmt.Printf("Raw %+v %d\n", raw, len(raw)) - return raw -} diff --git a/vendor/github.com/kataras/tablewriter/table_with_color.go b/vendor/github.com/kataras/tablewriter/table_with_color.go deleted file mode 100644 index 5a4a53ec2..000000000 --- a/vendor/github.com/kataras/tablewriter/table_with_color.go +++ /dev/null @@ -1,134 +0,0 @@ -package tablewriter - -import ( - "fmt" - "strconv" - "strings" -) - -const ESC = "\033" -const SEP = ";" - -const ( - BgBlackColor int = iota + 40 - BgRedColor - BgGreenColor - BgYellowColor - BgBlueColor - BgMagentaColor - BgCyanColor - BgWhiteColor -) - -const ( - FgBlackColor int = iota + 30 - FgRedColor - FgGreenColor - FgYellowColor - FgBlueColor - FgMagentaColor - FgCyanColor - FgWhiteColor -) - -const ( - BgHiBlackColor int = iota + 100 - BgHiRedColor - BgHiGreenColor - BgHiYellowColor - BgHiBlueColor - BgHiMagentaColor - BgHiCyanColor - BgHiWhiteColor -) - -const ( - FgHiBlackColor int = iota + 90 - FgHiRedColor - FgHiGreenColor - FgHiYellowColor - FgHiBlueColor - FgHiMagentaColor - FgHiCyanColor - FgHiWhiteColor -) - -const ( - Normal = 0 - Bold = 1 - UnderlineSingle = 4 - Italic -) - -type Colors []int - -func startFormat(seq string) string { - return fmt.Sprintf("%s[%sm", ESC, seq) -} - -func stopFormat() string { - return fmt.Sprintf("%s[%dm", ESC, Normal) -} - -// Making the SGR (Select Graphic Rendition) sequence. -func makeSequence(codes []int) string { - codesInString := []string{} - for _, code := range codes { - codesInString = append(codesInString, strconv.Itoa(code)) - } - return strings.Join(codesInString, SEP) -} - -// Adding ANSI escape sequences before and after string -func format(s string, codes interface{}) string { - var seq string - - switch v := codes.(type) { - - case string: - seq = v - case []int: - seq = makeSequence(v) - default: - return s - } - - if len(seq) == 0 { - return s - } - return startFormat(seq) + s + stopFormat() -} - -// Adding header colors (ANSI codes) -func (t *Table) SetHeaderColor(colors ...Colors) { - if t.colSize != len(colors) { - panic("Number of header colors must be equal to number of headers.") - } - for i := 0; i < len(colors); i++ { - t.headerParams = append(t.headerParams, makeSequence(colors[i])) - } -} - -// Adding column colors (ANSI codes) -func (t *Table) SetColumnColor(colors ...Colors) { - if t.colSize != len(colors) { - panic("Number of column colors must be equal to number of headers.") - } - for i := 0; i < len(colors); i++ { - t.columnsParams = append(t.columnsParams, makeSequence(colors[i])) - } -} - -// Adding column colors (ANSI codes) -func (t *Table) SetFooterColor(colors ...Colors) { - if len(t.footers) != len(colors) { - panic("Number of footer colors must be equal to number of footer.") - } - for i := 0; i < len(colors); i++ { - t.footerParams = append(t.footerParams, makeSequence(colors[i])) - } -} - -func Color(colors ...int) []int { - return colors -} diff --git a/vendor/github.com/kataras/tablewriter/util.go b/vendor/github.com/kataras/tablewriter/util.go deleted file mode 100644 index 9e8f0cbb6..000000000 --- a/vendor/github.com/kataras/tablewriter/util.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "regexp" - "strings" - - "github.com/mattn/go-runewidth" -) - -var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]") - -func DisplayWidth(str string) int { - return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, "")) -} - -// Simple Condition for string -// Returns value based on condition -func ConditionString(cond bool, valid, inValid string) string { - if cond { - return valid - } - return inValid -} - -func isNumOrSpace(r rune) bool { - return ('0' <= r && r <= '9') || r == ' ' -} - -// Format Table Header -// Replace _ , . and spaces -func Title(name string) string { - origLen := len(name) - rs := []rune(name) - for i, r := range rs { - switch r { - case '_': - rs[i] = ' ' - case '.': - // ignore floating number 0.0 - if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) { - rs[i] = ' ' - } - } - } - name = string(rs) - name = strings.TrimSpace(name) - if len(name) == 0 && origLen > 0 { - // Keep at least one character. This is important to preserve - // empty lines in multi-line headers/footers. - name = " " - } - return strings.ToUpper(name) -} - -// Pad String -// Attempts to play string in the center -func Pad(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - gapLeft := int(math.Ceil(float64(gap / 2))) - gapRight := gap - gapLeft - return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight) - } - return s -} - -// Pad String Right position -// This would pace string at the left side fo the screen -func PadRight(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return s + strings.Repeat(string(pad), gap) - } - return s -} - -// Pad String Left position -// This would pace string at the right side fo the screen -func PadLeft(s, pad string, width int) string { - gap := width - DisplayWidth(s) - if gap > 0 { - return strings.Repeat(string(pad), gap) + s - } - return s -} diff --git a/vendor/github.com/kataras/tablewriter/wrap.go b/vendor/github.com/kataras/tablewriter/wrap.go deleted file mode 100644 index a092ee1f7..000000000 --- a/vendor/github.com/kataras/tablewriter/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2014 Oleku Konko All rights reserved. -// Use of this source code is governed by a MIT -// license that can be found in the LICENSE file. - -// This module is a Table Writer API for the Go Programming Language. -// The protocols were written in pure Go and works on windows and unix systems - -package tablewriter - -import ( - "math" - "strings" - - "github.com/mattn/go-runewidth" -) - -var ( - nl = "\n" - sp = " " -) - -const defaultPenalty = 1e5 - -// Wrap wraps s into a paragraph of lines of length lim, with minimal -// raggedness. -func WrapString(s string, lim int) ([]string, int) { - words := strings.Split(strings.Replace(s, nl, sp, -1), sp) - var lines []string - max := 0 - for _, v := range words { - max = runewidth.StringWidth(v) - if max > lim { - lim = max - } - } - for _, line := range WrapWords(words, 1, lim, defaultPenalty) { - lines = append(lines, strings.Join(line, sp)) - } - return lines, lim -} - -// WrapWords is the low-level line-breaking algorithm, useful if you need more -// control over the details of the text wrapping process. For most uses, -// WrapString will be sufficient and more convenient. -// -// WrapWords splits a list of words into lines with minimal "raggedness", -// treating each rune as one unit, accounting for spc units between adjacent -// words on each line, and attempting to limit lines to lim units. Raggedness -// is the total error over all lines, where error is the square of the -// difference of the length of the line and lim. Too-long lines (which only -// happen when a single word is longer than lim units) have pen penalty units -// added to the error. -func WrapWords(words []string, spc, lim, pen int) [][]string { - n := len(words) - - length := make([][]int, n) - for i := 0; i < n; i++ { - length[i] = make([]int, n) - length[i][i] = runewidth.StringWidth(words[i]) - for j := i + 1; j < n; j++ { - length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j]) - } - } - nbrk := make([]int, n) - cost := make([]int, n) - for i := range cost { - cost[i] = math.MaxInt32 - } - for i := n - 1; i >= 0; i-- { - if length[i][n-1] <= lim { - cost[i] = 0 - nbrk[i] = n - } else { - for j := i + 1; j < n; j++ { - d := lim - length[i][j-1] - c := d*d + cost[j] - if length[i][j-1] > lim { - c += pen // too-long lines get a worse penalty - } - if c < cost[i] { - cost[i] = c - nbrk[i] = j - } - } - } - } - var lines [][]string - i := 0 - for i < n { - lines = append(lines, words[i:nbrk[i]]) - i = nbrk[i] - } - return lines -} - -// getLines decomposes a multiline string into a slice of strings. -func getLines(s string) []string { - return strings.Split(s, nl) -} diff --git a/vendor/github.com/lensesio/tableprinter/.gitattributes b/vendor/github.com/lensesio/tableprinter/.gitattributes deleted file mode 100644 index af76a77e9..000000000 --- a/vendor/github.com/lensesio/tableprinter/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go linguist-language=Go -* text=auto \ No newline at end of file diff --git a/vendor/github.com/lensesio/tableprinter/.gitignore b/vendor/github.com/lensesio/tableprinter/.gitignore deleted file mode 100644 index 8b8e9b8bb..000000000 --- a/vendor/github.com/lensesio/tableprinter/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.vscode/ -.directory \ No newline at end of file diff --git a/vendor/github.com/lensesio/tableprinter/.travis.yml b/vendor/github.com/lensesio/tableprinter/.travis.yml deleted file mode 100644 index e09538fe5..000000000 --- a/vendor/github.com/lensesio/tableprinter/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go -os: - - linux - - osx -go: - - "go1.10" -go_import_path: github.com/lensesio/tableprinter -env: - global: - - GOCACHE=off -install: - - go get -t ./... -script: - - go test -v -cover ./... -after_script: - # examples - - cd ./_examples - - go get ./... - - go test -v -cover ./... - - cd ../ \ No newline at end of file diff --git a/vendor/github.com/lensesio/tableprinter/CODE_OF_CONDUCT.md b/vendor/github.com/lensesio/tableprinter/CODE_OF_CONDUCT.md deleted file mode 100644 index a6203f186..000000000 --- a/vendor/github.com/lensesio/tableprinter/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at https://kataras.rocket.chat. -All complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ \ No newline at end of file diff --git a/vendor/github.com/lensesio/tableprinter/CONTRIBUTING.md b/vendor/github.com/lensesio/tableprinter/CONTRIBUTING.md deleted file mode 100644 index ab68e49ba..000000000 --- a/vendor/github.com/lensesio/tableprinter/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# Contributing - -Please make sure that you've read our [Code of Conduct](https://github.com/lensesio/tableprinter/blob/master/CODE_OF_CONDUCT.md) first. - -## PR - -1. Raise an issue at https://github.com/lensesio/tableprinter/issues. -2. Describe the issue, what did you expected to see and what you saw instead. -3. Link the issue in your PR. -4. Wait for response, we might request code changes before accept it. \ No newline at end of file diff --git a/vendor/github.com/lensesio/tableprinter/LICENSE b/vendor/github.com/lensesio/tableprinter/LICENSE deleted file mode 100644 index 3f4aed5a4..000000000 --- a/vendor/github.com/lensesio/tableprinter/LICENSE +++ /dev/null @@ -1,185 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2018 lensesio - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/lensesio/tableprinter/NOTICE b/vendor/github.com/lensesio/tableprinter/NOTICE deleted file mode 100644 index 2ed7ebc4a..000000000 --- a/vendor/github.com/lensesio/tableprinter/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Third Party libraries and components ------------------------------------- - -tablewriter : MIT - https://github.com/kataras/tablewriter -go-humanize : MIT - https://github.com/dustin/go-humanize \ No newline at end of file diff --git a/vendor/github.com/lensesio/tableprinter/README.md b/vendor/github.com/lensesio/tableprinter/README.md deleted file mode 100644 index 457098da8..000000000 --- a/vendor/github.com/lensesio/tableprinter/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# TablePrinter - -_tableprinter_ is an intelligent value-to-table formatter and writer. It uses a customized version of [olekukonko/tablewriter](https://github.com/kataras/tablewriter) to render a result table. - -![](color.png) - -It checks every in data and transforms those data(structure values, slices, maps, single lists that may contain different type of values such as go standard values like `int`, `string` even a value that implements the `fmt.Stringer` interface) to a table formatted text and outputs it to an `io.Writer`. Like `encoding/json#Encode` but for tables. - -[![build status](https://img.shields.io/travis/lensesio/tableprinter/master.svg?style=flat-square)](https://travis-ci.org/lensesio/tableprinter) [![report card](https://img.shields.io/badge/report%20card-a%2B-ff3333.svg?style=flat-square)](http://goreportcard.com/report/lensesio/tableprinter) [![godoc](https://img.shields.io/badge/godoc%20-reference-0077b3.svg?style=flat-square)](https://godoc.org/github.com/lensesio/tableprinter) -[![examples](https://img.shields.io/badge/learn%20by-examples-0c77e3.svg?style=flat-square)](https://github.com/lensesio/tableprinter/tree/master/_examples) - -## Installation - -The only requirement is the [Go Programming Language](https://golang.org/dl), at least version **1.10**. - -```sh -$ go get -u github.com/lensesio/tableprinter -``` - -```go -package main - -import ( - "os" - "sort" - - "github.com/kataras/tablewriter" - "github.com/lensesio/tableprinter" -) - -type person struct { - Firstname string `header:"first name"` - Lastname string `header:"last name"` -} - -func main() { - printer := tableprinter.New(os.Stdout) - persons := []person{ - {"Chris", "Doukas"}, - {"Georgios", "Callas"}, - {"Ioannis", "Christou"}, - {"Nikolaos", "Doukas"}, - {"Dimitrios", "Dellis"}, - } - - sort.Slice(persons, func(i, j int) bool { - return persons[j].Firstname > persons[i].Firstname - }) - - // Optionally, customize the table, import of the underline 'tablewriter' package is required for that. - printer.BorderTop, printer.BorderBottom, printer.BorderLeft, printer.BorderRight = true, true, true, true - printer.CenterSeparator = "│" - printer.ColumnSeparator = "│" - printer.RowSeparator = "─" - printer.HeaderBgColor = tablewriter.BgBlackColor - printer.HeaderFgColor = tablewriter.FgGreenColor - - // Print the slice of structs as table, as shown above. - printer.Print(persons) -} -``` - -### Examples - -* [Simple](/_examples/1_simple/main.go) -* [Inline](/_examples/2_inline/main.go) -* [List](/_examples/3_list/main.go) -* [Map](/_examples/4_map/main.go) -* [Struct](/_examples/5_struct/main.go) -* [Manually render a row](/_examples/6_custom_render_row/main.go) -* [JSON](/_examples/7_json_bytes/main.go) -* [Filter output rows](/_examples/8_filters/main.go) -* [Customize the table looking](/_examples/9_customize/main.go) - -## Versioning - -Current: **v0.0.3** - -Read more about Semantic Versioning 2.0.0 - -- http://semver.org/ -- https://en.wikipedia.org/wiki/Software_versioning -- https://wiki.debian.org/UpstreamGuide#Releases_and_Versions - -## License - -Distributed under Apache Version 2.0, see [LICENSE](LICENSE) for more information. diff --git a/vendor/github.com/lensesio/tableprinter/color.png b/vendor/github.com/lensesio/tableprinter/color.png deleted file mode 100644 index 2eb1c7c994aa0c5f1c3c6d05847ae9626b0d238e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10638 zcmbuFby!qi)b9@8(n<=_CEX0&jdTnp5=w_Oh*Coc2m=f$-Q6+LF?0<% zbMg1az4v|I_vRmGo%6@uXRp2X-p}({-@PK=tH=`KQ{n>vKqxQwP8|R+wC=t~@i6b~ ziBK)p-Rr)KoSr)XJnZ=UU?j6WqyhkXK>poZP4CRTEFVpS>3S$=FeomJIKqc^z@ath z>F1*-!ZC$>HCa^MUlN5h#y!svjv9o`Q&f*7-qdZ>14$E*O87V)fr0@=@DN$f6vM%XHDTR;(w}1g@6AW$nU%YraI|~UJ7Fy^2I{Ky<1FhEZ_+t0 zy(_|I>+@;76b~68ANc<@E1TfnHigU4aAkSkRH7`jkL4FK;GJ~KaPMK-c}W?n%pE?3 z2}nq;Ai>mqccF%L+D&XF4@RZ)(v$^E^={A_zmitl))Q+Eq@^W*7&}t$zh5lgDyc7n zx`lW)#Xsin?mEkCa64{{-_kKDy!E~fzxUo>`)X9MA@YZqfwuT1cmCS+`gC%lvJYfD z$8RiWa_YlH>~U|wpX*ixBtbOqU>wpNO}N^Bkq5suH=~fa_S+O^tH!(#zkr^GrGd{- z=ScAQ8@XSR}?(F6R4ufZzimD93!af z52tt1Y3C(jMr0TDEkncWUfDO&=EIG98x#+K){tl6B{#9ATkLAK1?-~`nQl}*BmkL` ziADc#=1`Xy>4^|btpPmC9PALa*#!?D^d}-{wn(z6NV1o`&T0_Ji=F@w!;CJ}d!0w1 z0eA6g>iWHbf729N#qe$iRWc7~2gu4p106WBLuQnoLA-LeV{9GT-p7*4F)R{43M@Ak zd!tICtr;GM4UA$`KKH5d?X%a8UgR?Dx!3Mp^82GYY|+YE$@aUN;FMjVL!yk7t`qN{ zcVe|WZAu2~pn@>Ep!GKE5H-Kel=e4T3|8Gx{SP&toZY~gP0QW38#)jwt#fRzef9xS zGF|Q$xdLbge!2Fo`|QX10u}cyBTa05cO%vASz7!p{!}p7;v%N5u}0VPmwvEK_~& zX`FFWWjiEb*fR5?ph4kP`Z66X!63j#p-MAAAB~rH@2Xquw)2ssF*-t$J+mqtwWLV* zfzm|n&^>|9upRMQ%XrQz?FxTd5g3{FTQ-{Q5jlX`I8!^}p*(g9te-ytClzP9KbGiO zFb~F-!YLF*LGAl|->|pdKO(~bf(mCsv1MilO}oXHiWGPqn=5s3eC?>{9W>sxE`TXK ztN239w^`#o2)HGAzSC-e_)oV#qa=$rXH*A*@SLlaAj|3v@xY&Dox1LvX+~kMx8mmT z!J|W*_ZbDM;nv@_VTbkj-^`}g{fKKlR5qSIV5i^_aztTJ5WK`{wV|OB-wRs8X>2WG zEZ5AgS)ZrP0?TQ48S*k%y=$QB7MWQpO_@wfI~t=vC8fR&Xb6PS)0^!w1@ncB!iz0m zS?P_M5wFRX9oE)n#}5m*Z~FR#2F|>*iY@1;AI~)ncYc_t-t@8mxBovAx7$QN`F(0c zML87G)(~7NUs@v*sQ5JQ^04I7jPj3Eobb(TM-KdliKEQ0$wvt-uLm||t`7%r*x@sp zwKZ>NU*S*L{itLX68NH_Bj9t`O--|n*nAi6Cz+4Yh>c+;rMg)p-kOxDfb{_BBTPH= zMH8wup`1oG6fNtYLexGn?cLAQA59&@MD7hq$X2jEY0}gFQeM~qx%r@jE-g0~A*)&NAlu7Ig5jTB@3Kd)K>AhV(uMJ5f3PJ8IfC zUO{Lk%T!E@l^Q4@ALrqV0x%OpASCa^YIv{Th=ax|D%L*m4VR83y{K7`SngP`hzK@4%tR-p+ zt?Ng)$u$QpuSG~PU{HQ)u)6J^oA{FYM4&?L?2y^}xMMU>4e_|0@9r>klMiozOT3^QT>X(A`#h!Snbmn>knnJ_e#Lp7O&@S1g@>tZ&{;$`4_wVzYSmi6cZ;9FLxtH^PQ-Y{%c&*xqy_qJ^>;&W=rk z%0YtHoP#CKBF2kC^}U&gvhK7j_#jw${}IpRz1qMgN(t&e*njjdnP~5}tYpz(%S!)& z&af*gXW<&Hbv*i1^xnJuYmZF6rBBw=1I(@&1wRT)e-z%Bzx{pw{S2YK%R?1ItEGM@ zzZGHRUagzl%=Mh15jN$7Tj$5ubr`9uk)5Y zx{3F!d00Su$U&!bAeI@&2`9+vBf zDC0V)zjk(+>HDmN%s75MSGHe`hxI!Q8^kW%cXMI*%MPD_iMvQ0pgFwN`mLlf zSAOnMT2ZEm7D*E4dT9A>`~0*oQ;O@Df0V#?IR>FyM-oDg^k&ZUs0jdK^SvhEzS|Fh zZQz9%Ex|}!gRAT=qgkl5<<|hr?e-I#5lHIR&5Zh6Pxb5w$4!UZ%|P{HPo568tv zQb&rM2R@%7?lrRbiFnF+>pv$+V!-d~ehdKNoV{}czbE)ZU=PA7w`p_V|2lh9OWC9W z!N~?Wd|L&L&2aI`ABWuX?-)<2JIqF?7Y zHTx$&XWprHKAB|gx`Q%5WNmKwS)ysu@-iFVm-Um~@N^QM^-?xn5gh4tQmI|6Y~Upj zYxVY>Wzd|Q%&KAM_YOCXja!`S3`3V$SL|R?`n!d=U#bjqM6YEC~TaV0$ z7&`>|-QrsI8d_|1bG%#kwVQSwDQK@5Dnk;DpjsuT?U|aKFsgrr5Q!qmdWkvj;)iuv z{i{Es(0M4>qG>fD7{vy9NDvBkdq`xEYrZ}UP9U0y}o3fiCcQSEtjR)r7!U^DcCzt)?+>Ub{z1c4hg?-i8T zdO2gY1|6%Y8akEmeSUrUTAWn_(h7e$KG5tcsU{h0)0Yr`A)Z7A7ygTf9d3ZzWG3BAyyy7#|az&_&SIALb-aB2iB$m{7AJpR_3z z1HJZq57b35!${Xf2B;rci=t%lxF-We0U`NE6jU_RrGUucW-abzc4-!Us*Fj z8KvYcRlpna@uTO@>cqjz(C8C0Gp~T*VV9co6X%+8&uyYSf`OI?=FQ{hy$FvpsU_2A zr5~FT*Pl{aItzQ8Ong$9Zn$ehw%Le^jq7ATD2wB|anzQYpREIp3YjbH1sxb4nncyTr`iYRc3I4gEd%N{uOhbxF z(|sI|OY2KZv8#phC$Gl8?ICQoCxQ!O`GhztdL|@YQOa65H`2?FyH5+;engP)jA+i~ znjKw;y^SBO&mgJdg{U-M%TIC#8ol;-pA1&%2vrr&d@j(-MJ?`GpzKIv(0Bas*zz=u zd1sZ=v)zO|(4L=gtL)9K4-EitU&dqm<#^Mtjs~!HiVt}h9GryL6FpgnGkGayi%Yq$ zPK;K}eNK!nn=@WtQx%|6OmBAFO#dYp`N`5E7qZ+(zUg$PI7S^%BR`hf&v0zxjHptcGP+Pe*){{CCw3gsL^9_djZss}oZ_ad=HU~eI` zTXtn-b#6b5qz0WM7q}eW%qA1X%A;=Uwd|qalW2=+uP?0epPvX+3@hNrA3l}QQq}0Z zT~A6qUDL&{bMlk1 zw1IM=mkexd3}Q;GWInG38L+Ylr-uUr7F`ckR~TiLQJuw4O_jD-S)2rh#-nJUCm&7n zO3Ir{XIY~Z6o=v0&*3-Mu?U)_)xFeIM;zoCEbb z-Jz*(t1c6_n$*>&0ngTez0`Zl7x-% z4Y(%{1E1P7$gx$FaZ*)1XxAxSIJZ#R>TLnP_i~;+iJC{9uG-S>foKAuZYB=l%a5Q= z)-kGH27b!XT|JLKGX2=t6WOx&x!o&?uSC}Q_o^j;t>kRDTTKk+|1=JEW$T~GBfpEzjn@an3}V0hJV|4(DuTl$cGN5qkkr8zRJG^ zeTq#{$|Scq@|HMT#&v7iO7;5smXewp5BjF=K9Oi9r#4*ZI98SFgC`JFy*!`e)4a9p zSmW4TvN=yUh|gsJ!qo>>1BYJ^OA~%3ZQVyGKt@rn}7e1UOS!C6-T@TqWmfD_! zJ3I3a4EM9$lx*Gc#DOhg3oJk~Z)+bjmFux-5lS0y{HVzYbfGUf~9B zSUCgE9g(?1HkHv;NU_Q;*P-1VM%msQ_}p|Ma2Or2HatJz)B^RnmE>lzZ)?#KLuvLy-msFRPbw zko$t7?WE_l@x>M$ozb}4Hy(9Hq@O!6E3pNc)m2blLBTA3?$)+IsI}Zk(sv&m?CX#w z7?d1nwc!K2eTA|gm|!d1T9wv9N(`4YJa%rQ-X0{w=s+B4MJBbeM0B~bFEd0kx|4CD ztV&G?$T?oy9orqSG|gC;;5f}dnPmIi{XY&avro%nD*j>k>Lz54Yx0?n^LaU;YI@~K zv8=ZD*ZE;G!C1_on)>5#GD&wqMnB%JdG9c1<_PNyN>&6R0Hn9IsU1csM%4M)#q2>F zxZ?#8I~boGK>>#zOuSYIRGDdJrF6=&DNWG36)$LzPdwE$VP>IGo=x|{Vax77{i%at^NjclP4E$sb1JbB1wGcZ_#1&BT!5I>3^)wcdGSLRXcb$S z9O1_VjBWh_k8)E>>+`|y%c_t4C3U{PJS6-ApCC{h5q6`)Gp;!it~BJz1AD(K;)*rh zZ8%0nRs~mv$|=rI=OlIS4+pGBx6H}R#%U4PK7ZXMYk=O~E~!k!JWol8Y8wdQA8w>? z9_2APc^IV{#r*-Po9}OZFfa!`x+q-c5^LCp)5f=WeAC9IZ_z96V`25a(HkxiepCK} zmCouf;%h1s?Ubr&usoaX& zi6dFQ>AOj-fV|opg=GH@({4MP+m%7DE2zVOvR+!wZ|;X4aMZ@hN_1Jfmnx5*a5Vfi zPlKbo{yS~sMy*9Hotq)@;|{u}uY%U#^c@nW-~her+ho4$bmF&M zxG@Vv4CPtLzFj|2+N{wouJJFu7SM^+{9H>* z%0I*Z^e_{>A3{+|8`1Z-o87|m(~av3$m>EfU#}?7m{23jaPd!6$EF&+{gD3lCK0RO zn7ywkNmqx0PA*y&YiU!~F2LFbC;sP7uA@kBw%Rq4_uHPIwtU&*4<>O|%Q8QIRqdcM zWS4j>SWEonz07I!ARe2y*-6O=eK8RcP>T z)Vqsv{&P|2w4nR-5e@yy%!kn9E5v4VaU6pA*Fas9TVU@HS{ceuG6%zQ^CIUn5)F8U z${R#@(navwQZ>I>OoN&aE}ebh0*_iT7`DrKd34J0%kg-L#MzXh8tY6AM;)}4Y94bb zyi(c9^z?c5>x8H2n{M#OVXCG;(Ks5K{o&8UuekVdK3Eizwq8F4` zly>$TLTD&nr_lY)U;PjAveH;c8HeCCuid_%9-srRhAzY|{coFdcKc^5YgdM?i>`jZ z{%5>1Fs=bF3r@0vFhEco4oozO$^aCTI|F~DeFq+i*m8V?MZ zk);$&tN+A7gI7;&AU9V6uxe`qktsQMAdfM13)-Di>Gl~1T0+axo(RmZiTSW-wf*h0 zp6TiF>=*Cqdo@EMqL&iZy|SCa1qUD{tbXsbp~+8v+s6ks_MSwaWf*{*nb(f$6UIVn zewY1m-*>-x*t>g)(zRao8%!I@+yzBQJ899P3TuWx3rbZKr^tzuO1sY}jLI>yHWYZxeA3j_YL~K&gW|R*?1l*!g*o3WC?=qv)^1q76D`Fd#=YlK z1mdrBpCb&r+e-sc>e^a8Wu+High@U=tAn7EC;s_7(aqToHYF85K%Y;k%0tgWEdM zQLg(&zX&$XGxk9HZJ84SLDugLlfnpr-S=XE6h+F{JHYr^^d{Z!)PizE;^$*|yPz`J@JHAYbo-&R07){WU=MEttY2!yw%G;n~fB{CGhf`?)wrmQ-< zt!1h>!sDM=ws$I$t|EOUCC(YHA21o+#v{(9F@d1wn;vIzTc+_KoIamZFoWU)?WB_+ zl=Ho}FWYVy@B;ozqvwf+^8RUlW8Lwn?*_jAAopc{6uxrT4jRg&%)#EH5c%-@WFuG}Lc3!RGtyJnQ!OcCJ^{nHlGbw=dwox5n<6D) zZ&_d(;=d2ii6VPA`S^?=`F)-5X!arQo@}~-X|ZBZHtB(1HrH?T-+27lW+05|XXB~S zoJwS6{l^pOOKY4bVzt6*Mb4<>!`_7b%3-Sl>mWLv?GM6WKyd0c>``U}Zcd<(v_k?& zC~#LP!}o^8EyXL2D}R_Jx{opMB|0;V5W9^%V6d=oKb#J52oLN`ihDGh9yu`=cJ5>n zY4!kGrW%`|@Q${$w>!{xsPWCYheQ$5^QpfZbnTLtPf&&Cwq&ra%xv;}>DGz-KTjdD z`iMLq8`XJ>zE1$lXqqM9Zn3i3ZJyIUnoGYwIR|jJG-Z%njv$HF+~yUBbHoZy@cKFX zVo5&pfjD3s_S7KkohBn}S34OBk_LsSj7@fy*g6kkcmIa@IeUFRnG|Lb7Jlc=x1fH6 zvrfEkb|yiXQJw$av0P2!aA&B5g*Q{q@1`VQRD)q+>?bJbpv^Fu*Y9I!VM;;xgPG`}uZ0f&LzSEq|h#ee5e}lflZta8U66K1}0kG}{UL zC)N)OBc;0t(u!Ozu>&=}#?YL>XK6t0K^!4~_whMCG_>eJY6b2cYXdQT#&#@<_onCUzp_Lc@I%0`LXIOyf z0wEPPtLYP4GixRp@`aP9Qz-(|3$$nr;@Q7PGmQJfLAh{@lKzXzNtR(lvCDcdbO}~o zdv{7EM+5?nrVwS9h$@X$sc(r=U$qmBd1rj`8`|-Jlp*IOVVrq^fK+Avm3 zcOtXhDu6>0jc=(10Lan)tePpPP{I3-x)&j&tKPC}$S4la#q6^l~Xsx0C zd1Nil9@lbRXg|hbP)#Bk;GGg5TprE_%7jfiznli-E?bNYHh0B{uUjx$t<2@ep?ijj zWGqKtE$i-oJc@?k%ILO{Waxavkq__&ey)>$s{Py|@3CD07IXL75q6%%b-0-&609l| z%lQqu-F2?T)uIl#oZ#7&Oyxj_**QLbV;y_pI9zc$wp4fQI2$MZBoX=wd=Tw_av)wV znBX9>hW7Nys-BnljlQFZx-71H541eaEC;pKH1)4eeSIOh4oEklu@@9DyfU&ZOEvP=i(3A$jbNbxVIekQM#3 zNgx_ZwUxPu&4Y3Vh(GzlFa8XYDC-LY+zPPl>)x2+GwurRz{)cdx|BsqoS>g0xO8%S zr~DIBO5$kQDLXJ)HyZ)|e!67wVQ7z(F)C8VH_h-qL1c&5Gs2C|b&sKaOct~t@ZsNO z9FcyPVUEvCR`F*NOLkzmZuYG>OOxRJtZGx2)=?Z=o;1K;RFrJQB+$@!b5|hl2#odh z%9n--&$kJq)q3tf^t~a0)bws=p819#s>2BJTs*R>VG_G&9016QxyT6IAnQsras9V} z{x(j1yQ%az^$r*wblgqXAG*bDH?F zB^5j2ESFXrD*_8sXdJHUkTgy3FeXIDP_koD-TJzWQ)x{ZG&=Tn@&?XJ&)JoVMW1vG z%&Zi3yif%)^o`;9*z|c*FFiT9XrKSf>fm9b1+hinYY{OJdeF?bk9&$cM%&PGSBqc^ z%+8*ieJ?%#FOzaPP?i3RyR+4>1H*lpgVw9GgyVm`x*@td12Z*^-gvk0@`^hQ+c*Pj z0G4ODtm2+wFh_eULt{0{c0Qh&30_xr35z7CCs~WT#fSNgUlFB^-jD`J`9&7jTBNw| ztJri)*iG;1LRkKyLuP(lrYiZC;D$ZWdV-X?Rec9I$TdvvAm$^iz=CLp*C`a2b%=BeDw{Y@z{ z+>h5_nfIuMuMnnr_LEwD1>|{Aflm20b~!!n$-zSczGQ(SU6E$h_ZA=aH^O!YN=Oa@ z)%lg?v(6q%7@_#>zHqzmmGC-FZ-51`7U%KgeIY7fV_X1G3bbfjVxsNw{*a?^6zjgo zZ21j;=eVMzv7C8MSC_)=_5;!(XQZQiy_h?!7?uOi>9R%bf@8EF>SEv#u7nXYcgcGM zha6r9D>K24mtXa^h(qo6Qj*3PCe0aSYV`eNdgECrrps}6hS2V^2ri@PtBrE`>_vy7 zvdigfy1>)n;Xc2fdsA#q*WoYl}yRq4c)M*zCEKRcpHnPMG?nz)H2-d?MGb zVm9iB`N^NZYO)Hm`_HC=mE%qXp~7%cV-_ymFtHLJ{cZ?N%^t3*lfinvZM~Ln<*8Kw z)R-9dW|qkMdw)>uiUz>^;6W1R?54gfu*_wR#d`2|M=lX-;k2WHoMpRtq=!OCLcFO zx$+q0(%%G{{i`O-tZ6@NpS!>T>GXPxn)AhdgS(jbk*t zkKxm>@~FuBeD!RdS1JV$o`k`5zAA0h1xEAKoG&oqO6-33x6rG>fc5+JNr|4tJ zawF(0*_#%V|5;{zbBT}5YWciVsF?FTaUh*3{zreHMq4pY-)}#GVg0+;nQtx%1&_}l zce1^8n<}W{IDy~iejs9ioNYm0j^miO&K>M2kO?)K{S{ToO4PG|*O}d{wu21I>}*q! zr{||7G^~J|VY|~U4NpG*+_DkSZGc_x*U(z2{f$8wHgAmLbo=84w6kr!{#>V` zaf>>)!h$!Yk=FWgc8SJ;4uNmQ!tI4!Z^y(` zi>9921NSkQjVOEXi8$6eIhBl5b+y#N(QQ|4h1f+y;?rsm^wdw+VxCHcG(5>pp~$}x z$oV7B>nV2DgHPmsJ-f1@mTO!ls`$_928yx|o``qNv5|(#e|#JSLt;K|NGUYiYw^Ar zKP0C!LORFxz~87r81mF{hI8wf?^6LYdHz5+H~Xtz9Kb})gAchS&)gLWyW2lij~nea zz0B&3CDDfw??JTfGal==B$im_o7XHl_QUM2&6t`HcM%XpTO(NOU-W}txUGyQjMRu= zDW64>m}01I94xXO_(7Fy88lgNEySx@T-+R>u9VV Y+99}zh;HOvfh8a>t@5s1(j?@+0m|ZtlmGw# diff --git a/vendor/github.com/lensesio/tableprinter/doc.go b/vendor/github.com/lensesio/tableprinter/doc.go deleted file mode 100644 index b9e62cb50..000000000 --- a/vendor/github.com/lensesio/tableprinter/doc.go +++ /dev/null @@ -1 +0,0 @@ -package tableprinter diff --git a/vendor/github.com/lensesio/tableprinter/json.go b/vendor/github.com/lensesio/tableprinter/json.go deleted file mode 100644 index 48d664117..000000000 --- a/vendor/github.com/lensesio/tableprinter/json.go +++ /dev/null @@ -1,42 +0,0 @@ -package tableprinter - -import ( - "encoding/json" - "reflect" -) - -type jsonParser struct{} - -var byteTyp = reflect.TypeOf([]byte{0x00}[0]) - -func (p *jsonParser) Parse(v reflect.Value, filters []RowFilter) (headers []string, rows [][]string, nums []int) { - var b []byte - - if kind := v.Kind(); kind == reflect.Slice { - if v.Len() > 0 && v.Index(0).Type() == byteTyp { - b = v.Bytes() - } else { - return - } - } else if kind == reflect.String { - b = []byte(v.String()) - } else { - return - } - - var in interface{} // or map[string]interface{} - if err := json.Unmarshal(b, &in); err != nil { - return - } - - if in == nil { - return - } - - inValue := indirectValue(reflect.ValueOf(in)) - if !inValue.IsValid() || reflect.Zero(indirectType(reflect.TypeOf(in))) == inValue { - return - } - - return WhichParser(inValue.Type()).Parse(inValue, filters) -} diff --git a/vendor/github.com/lensesio/tableprinter/map.go b/vendor/github.com/lensesio/tableprinter/map.go deleted file mode 100644 index 7060e937a..000000000 --- a/vendor/github.com/lensesio/tableprinter/map.go +++ /dev/null @@ -1,159 +0,0 @@ -package tableprinter - -import ( - "fmt" - "reflect" -) - -// Should we have a single parser value its specific types and give input arguments to the funcs, like "keys" -// or is better to initialize a new parser on each output, so it can be used as a cache? -type mapParser struct { - TagsOnly bool -} - -func (p *mapParser) Parse(v reflect.Value, filters []RowFilter) ([]string, [][]string, []int) { - keys := p.Keys(v) - if len(keys) == 0 { - return nil, nil, nil - } - - headers := p.ParseHeaders(v, keys) - rows, numbers := p.ParseRows(v, keys, filters) - - return headers, rows, numbers -} - -func (p *mapParser) Keys(v reflect.Value) []reflect.Value { - return v.MapKeys() -} - -func extendSlice(slice reflect.Value, typ reflect.Type, max int) reflect.Value { - if slice.Len() == max { - return slice - } - - empty := reflect.New(typ).Elem() - if slice.Len() == 0 { - for max > 0 { - slice = reflect.Append(slice, empty) - max-- - } - return slice - } - - for max > slice.Len() { - slice = reflect.Append(slice, empty) - } - - return slice -} - -func (p *mapParser) ParseRows(v reflect.Value, keys []reflect.Value, filters []RowFilter) ([][]string, []int) { - // cursors := make(map[int]int) // key = map's key index(although maps don't keep order), value = current index of elements inside the map. - maxLength := maxMapElemLength(v, keys) - - rows := make([][]string, maxLength) - // depends on the header size, this is for the entire col aligment but - // we can't do that on `GetHeaders` because its values depends on the rows[index] value's type to the table. - numbers := make([]int, 0) - - for _, key := range keys { - - elem := v.MapIndex(key) - if elem.Kind() != reflect.Slice { - if !CanAcceptRow(elem, filters) { - continue - } - - a, row := extractCells(0, emptyHeader, elem, p.TagsOnly) - if len(row) == 0 { - continue - } - - if cap(rows) == 0 { - rows = [][]string{row} - } else { - rows[0] = append(rows[0], row...) - } - - numbers = append(numbers, a...) - continue - } - - n := elem.Len() - if n == 0 { - continue - } - - if elem.Len() < maxLength { - elem = extendSlice(elem, elem.Index(0).Type(), maxLength) - } - - for i, n := 0, elem.Len(); i < n; i++ { - item := elem.Index(i) - if !CanAcceptRow(item, filters) { - continue - } - - a, row := extractCells(i, emptyHeader, item, p.TagsOnly) - - if len(row) == 0 { - continue - } - - rows[i] = append(rows[i], row...) - numbers = append(numbers, a...) - } - } - - return rows, numbers -} - -func (p *mapParser) ParseHeaders(v reflect.Value, keys []reflect.Value) (headers []string) { - if len(keys) == 0 { - return nil - } - - for _, key := range keys { - // support any type, even if it's declared as "interface{}" or pointer to something, we care about this "something"'s value. - key = indirectValue(key) - if !key.CanInterface() { - continue - } - - if header := stringValue(key); header != "" { - headers = append(headers, header) - } - } - - return -} - -func maxMapElemLength(v reflect.Value, keys []reflect.Value) (max int) { - for _, key := range keys { - elem := v.MapIndex(key) - if elem.Kind() != reflect.Slice { - continue - } - if current := elem.Len(); current > max { - max = current - } - } - - return -} - -func stringValue(key reflect.Value) string { - if !key.CanInterface() { - return "" - } - - switch keyV := key.Interface().(type) { - case string: - return keyV - case fmt.Stringer: - return keyV.String() - default: - return "" - } -} diff --git a/vendor/github.com/lensesio/tableprinter/parser.go b/vendor/github.com/lensesio/tableprinter/parser.go deleted file mode 100644 index f391cd715..000000000 --- a/vendor/github.com/lensesio/tableprinter/parser.go +++ /dev/null @@ -1,65 +0,0 @@ -package tableprinter - -import ( - "reflect" -) - -// Parser should be implemented by all available reflect-based parsers. -// -// See `StructParser`(struct{} type), `SliceParser`(slice[] type), `MapParser`(map type) and `JSONParser`(any type). -// Manually registering of a parser is a valid option (although not a recommendation), see `RegisterParser` for more. -type Parser interface { - // Why not `ParseRows` and `ParseHeaders`? - // Because type map has not a specific order, order can change at different runtimes, - // so we must keep record on the keys order the first time we fetche them (=> see `MapParser#ParseRows`, `MapParser#ParseHeaders`). - Parse(v reflect.Value, filters []RowFilter) (headers []string, rows [][]string, numbers []int) -} - -// The built'n type parsers, all except `JSONParser` are directly linked to the `Print/PrintHeadList` functions. -var ( - StructParser = &structParser{TagsOnly: true} - SliceParser = &sliceParser{TagsOnly: true} - MapParser = &mapParser{TagsOnly: false} - JSONParser = new(jsonParser) -) - -// WhichParser returns the available `Parser` for the "typ" type; Slice, Map, Struct... -func WhichParser(typ reflect.Type) Parser { - if p, ok := availableParsers[typ.Kind()]; ok { - return p - } - return nil // it can return nil. -} - -var availableParsers = map[reflect.Kind]Parser{ - reflect.Struct: StructParser, - reflect.Slice: SliceParser, - reflect.Map: MapParser, -} - -// RegisterParser sets a parser based on its kind of type. -// It overrides any existing element on that kind, each Parser reflects a single kind of type. -// -// It can be used at the initialization of the program to register a custom Parser, see `StructParser` for example. -// It's not designed to be safe to use it inside many different routines at the same time. -func RegisterParser(kind reflect.Kind, parser Parser) { - availableParsers[kind] = parser -} - -// like reflect.Indirect but for types and reflect.Interface types too. -func indirectType(typ reflect.Type) reflect.Type { - if kind := typ.Kind(); kind == reflect.Interface || kind == reflect.Ptr { - return typ.Elem() - } - - return typ -} - -// like reflect.Indirect but reflect.Interface values too. -func indirectValue(val reflect.Value) reflect.Value { - if kind := val.Kind(); kind == reflect.Interface || kind == reflect.Ptr { - return val.Elem() - } - - return val -} diff --git a/vendor/github.com/lensesio/tableprinter/quanity_util.go b/vendor/github.com/lensesio/tableprinter/quanity_util.go deleted file mode 100644 index a508d9761..000000000 --- a/vendor/github.com/lensesio/tableprinter/quanity_util.go +++ /dev/null @@ -1,134 +0,0 @@ -package tableprinter - -import ( - "math" - "strconv" - "strings" -) - -var units = [...]string{"K", "M", "B", "T"} - -/* -references & credits: -- https://github.com/DeyV/gotools/blob/master/numbers.go -- https://golang.org/pkg/math/#Modf -*/ -func nearestThousandFormat(num float64) string { - if math.Abs(num) < 999.5 { - xNum := formatNumber(num) - xNumStr := xNum[:len(xNum)-3] - return xNumStr - } - - xNum := formatNumber(num) - xNumStr := xNum[:len(xNum)-3] - xNumCleaned := strings.Replace(xNumStr, ",", " ", -1) - xNumSlice := strings.Fields(xNumCleaned) - count := len(xNumSlice) - 2 - - xPart := units[count] - - afterDecimal := "" - if xNumSlice[1][0] != 0 { - afterDecimal = "." + string(xNumSlice[1][0]) - } - final := xNumSlice[0] + afterDecimal + xPart - return final -} - -func formatNumber(input float64) string { - x := roundInt(input) - xFormatted := numberFormat(float64(x), 2, ".", ",") - return xFormatted -} - -func roundInt(input float64) int { - var result float64 - - if input < 0 { - result = math.Ceil(input - 0.5) - } else { - result = math.Floor(input + 0.5) - } - - i, _ := math.Modf(result) - - return int(i) -} - -func numberFormat(number float64, decimals int, decPoint, thousandsSep string) string { - if math.IsNaN(number) || math.IsInf(number, 0) { - number = 0 - } - - var ret string - var negative bool - - if number < 0 { - number *= -1 - negative = true - } - - d, fract := math.Modf(number) - - if decimals <= 0 { - fract = 0 - } else { - pow := math.Pow(10, float64(decimals)) - fract = roundPrec(fract*pow, 0) - } - - if thousandsSep == "" { - ret = strconv.FormatFloat(d, 'f', 0, 64) - } else if d >= 1 { - var x float64 - for d >= 1 { - d, x = math.Modf(d / 1000) - x = x * 1000 - ret = strconv.FormatFloat(x, 'f', 0, 64) + ret - if d >= 1 { - ret = thousandsSep + ret - } - } - } else { - ret = "0" - } - - fracts := strconv.FormatFloat(fract, 'f', 0, 64) - - for i := len(fracts); i < decimals; i++ { - fracts = "0" + fracts - } - - ret += decPoint + fracts - - if negative { - ret = "-" + ret - } - return ret -} - -func roundPrec(x float64, prec int) float64 { - if math.IsNaN(x) || math.IsInf(x, 0) { - return x - } - - sign := 1.0 - if x < 0 { - sign = -1 - x *= -1 - } - - var rounder float64 - pow := math.Pow(10, float64(prec)) - intermed := x * pow - _, frac := math.Modf(intermed) - - if frac >= 0.5 { - rounder = math.Ceil(intermed) - } else { - rounder = math.Floor(intermed) - } - - return rounder / pow * sign -} diff --git a/vendor/github.com/lensesio/tableprinter/row.go b/vendor/github.com/lensesio/tableprinter/row.go deleted file mode 100644 index ee5031673..000000000 --- a/vendor/github.com/lensesio/tableprinter/row.go +++ /dev/null @@ -1,391 +0,0 @@ -package tableprinter - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/dustin/go-humanize" -) - -const ( - // HeaderTag usage: Field string `header:"Name"` - HeaderTag = "header" - // InlineHeaderTag usage: Embedded Struct `header:"inline"` - InlineHeaderTag = "inline" - // NumberHeaderTag usage: NumberButString string `header:"Age,number"` - NumberHeaderTag = "number" - // CountHeaderTag usage: List []any `header:"MyList,count"` - CountHeaderTag = "count" - // ForceTextHeaderTag usage: ID int `header:"ID,text"` - ForceTextHeaderTag = "text" - - // TimestampHeaderTag usage: Timestamp int64 `json:"timestamp" yaml:"Timestamp" header:"At,timestamp(ms|utc|02 Jan 2006 15:04)"` - TimestampHeaderTag = "timestamp" - // TimestampFromMillisecondsHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms)"` - TimestampFromMillisecondsHeaderTag = "ms" - // TimestampAsUTCHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc)"` - TimestampAsUTCHeaderTag = "utc" - // TimestampAsLocalHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|local)"` - TimestampAsLocalHeaderTag = "local" - // TimestampFormatHumanHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|human)"` - TimestampFormatHumanHeaderTag = "human" - // TimestampFormatANSICHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|ANSIC)"` - TimestampFormatANSICHeaderTag = "ANSIC" - // TimestampFormatUnixDateCHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|UnixDate)"` - TimestampFormatUnixDateCHeaderTag = "UnixDate" - // TimestampFormatRubyDateHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RubyDate)"` - TimestampFormatRubyDateHeaderTag = "RubyDate" - // TimestampFormatRFC822HeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RFC822)"` - TimestampFormatRFC822HeaderTag = "RFC822" - // TimestampFormatRFC822ZHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RFC822Z)"` - TimestampFormatRFC822ZHeaderTag = "RFC822Z" - // TimestampFormatRFC850HeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RFC850)"` - TimestampFormatRFC850HeaderTag = "RFC850" - // TimestampFormatRFC1123HeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RFC1123)"` - TimestampFormatRFC1123HeaderTag = "RFC1123" - // TimestampFormatRFC1123ZHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RFC1123Z)"` - TimestampFormatRFC1123ZHeaderTag = "RFC1123Z" // default one. - // TimestampFormatRFC3339HeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RFC3339)"` - TimestampFormatRFC3339HeaderTag = "RFC3339" - // TimestampFormatARFC3339NanoHeaderTag usage: Timestamp int64 `header:"Start,timestamp(ms|utc|RFC3339Nano)"` - TimestampFormatARFC3339NanoHeaderTag = "RFC3339Nano" - - // DurationHeaderTag usage: Uptime int64 `header:"Uptime,unixduration"` - DurationHeaderTag = "unixduration" - // DateHeaderTag usage: Start string `header:"Start,date"`, the field's value should be formatted as time.RFC3339 - DateHeaderTag = "date" -) - -// RowFilter is the row's filter, accepts the reflect.Value of the custom type, -// and returns true if the particular row can be included in the final result. -type RowFilter func(reflect.Value) bool - -// CanAcceptRow accepts a value of row and a set of filter -// and returns true if it can be printed, otherwise false. -// If no filters passed then it returns true. -func CanAcceptRow(in reflect.Value, filters []RowFilter) bool { - acceptRow := true - for _, filter := range filters { - if filter == nil { - continue - } - - if !filter(in) { - acceptRow = false - break - } - } - - return acceptRow -} - -var ( - rowFilters = make(map[reflect.Type][]RowFilter) - rowFiltersMu sync.RWMutex -) - -// MakeFilters accept a value of row and generic filters and returns a set of typed `RowFilter`. -// -// Usage: -// in := reflect.ValueOf(myNewStructValue) -// filters := MakeFilters(in, func(v MyStruct) bool { return _custom logic here_ }) -// if CanAcceptRow(in, filters) { _custom logic here_ } -func MakeFilters(in reflect.Value, genericFilters ...interface{}) (f []RowFilter) { - typ := in.Type() - - rowFiltersMu.RLock() - if cached, has := rowFilters[typ]; has { - rowFiltersMu.RUnlock() - return cached - } - rowFiltersMu.RUnlock() - - for _, filter := range genericFilters { - filterTyp := reflect.TypeOf(filter) - // must be a function that accepts one input argument which is the same of the "v". - if filterTyp.Kind() != reflect.Func || filterTyp.NumIn() != 1 /* not receiver */ { - continue - } - - if filterInTyp := filterTyp.In(0); filterInTyp != in.Type() { - goodElementType := false - if in.Kind() == reflect.Slice { - if in.Len() > 0 { - if filterInTyp == in.Index(0).Type() { - // the slice contains element that is the same as the filter's func, we must allow that for slices because slice parser executes that(correctly) per ELEMENT. - goodElementType = true - } - } - } - if !goodElementType { - continue - } - } - - // must be a function that returns a single boolean value. - if filterTyp.NumOut() != 1 || filterTyp.Out(0).Kind() != reflect.Bool { - continue - } - - filterValue := reflect.ValueOf(filter) - func(filterValue reflect.Value) { - f = append(f, func(in reflect.Value) bool { - out := filterValue.Call([]reflect.Value{in}) - return out[0].Interface().(bool) - }) - }(filterValue) - } - - // insert to cache, even if filters are empty. - rowFiltersMu.Lock() - rowFilters[typ] = f - rowFiltersMu.Unlock() - - return -} - -func extractCells(pos int, header StructHeader, v reflect.Value, whenStructTagsOnly bool) (rightCells []int, cells []string) { - if v.IsValid() && v.CanInterface() { - s := "" - vi := v.Interface() - - switch v.Kind() { - case reflect.Int64: - if header.ValueAsTimestamp { - n := vi.(int64) - if n <= 0 { - break - } - - if header.TimestampValue.FromMilliseconds { // to seconds. - n = n / 1000 - } - - t := time.Unix(n, 0) - if t.IsZero() { - break - } - - if header.TimestampValue.UTC { - t = t.UTC() - } else if header.TimestampValue.Local { - t = t.Local() - } - - if header.TimestampValue.Human { - s = humanize.Time(t) - } else { - s = t.Format(header.TimestampValue.Format) - } - - // if !header.ValueAsText { - // rightCells = append(rightCells, pos) - // } - - break - } - - if header.ValueAsDuration { - got := vi.(int64) - if got <= 0 { - break - } - - dif := time.Now().Unix() - got/1000 - t := time.Unix(dif, 0) - dur := time.Since(t) - if dur <= 0 { - break - } - - dur += (100 * time.Millisecond) / 2 - days := (dur / (24 * time.Hour)) - dur = dur % (24 * time.Hour) - hours := dur / time.Hour - dur = dur % time.Hour - minutes := dur / time.Minute - dur = dur % time.Minute - seconds := dur / time.Second - - if days == 1 { - s = fmt.Sprintf("%d day", days) - } else if days > 1 { - s = fmt.Sprintf("%d days", days) - } - - if hours == 1 { - s += fmt.Sprintf(" %d hour", hours) - } else if hours > 1 { - s += fmt.Sprintf(" %d hours", hours) - } - - if minutes == 1 { - s += fmt.Sprintf(" %d minute", minutes) - } else if minutes > 1 { - s += fmt.Sprintf(" %d minutes", minutes) - } - - if seconds >= 30 { - s += fmt.Sprintf(" %d seconds", seconds) - } else if s == "" && seconds > 0 { - s = "few seconds" - } - - // remove first space if any. - if s != "" && s[0] == ' ' { - s = s[1:] - } - - break - } - - if !header.ValueAsText { - header.ValueAsNumber = true - rightCells = append(rightCells, pos) - } - - s = fmt.Sprintf("%d", vi) - // fallthrough - case reflect.Int, reflect.Int16, reflect.Int32: - if !header.ValueAsText { - header.ValueAsNumber = true - rightCells = append(rightCells, pos) - } - - s = fmt.Sprintf("%d", vi) - case reflect.Float32, reflect.Float64: - s = fmt.Sprintf("%.2f", vi) - rightCells = append(rightCells, pos) - case reflect.Bool: - if vi.(bool) { - s = "Yes" - } else { - s = "No" - } - case reflect.Slice, reflect.Array: - n := v.Len() - if header.ValueAsCountable { - s = strconv.Itoa(n) - header.ValueAsNumber = true - } else if n == 0 && header.AlternativeValue != "" { - s = header.AlternativeValue - } else { - for fieldSliceIdx, fieldSliceLen := 0, v.Len(); fieldSliceIdx < fieldSliceLen; fieldSliceIdx++ { - vf := v.Index(fieldSliceIdx) - if vf.CanInterface() { - s += fmt.Sprintf("%v", vf.Interface()) - if hasMore := fieldSliceIdx+1 < fieldSliceLen; hasMore { - s += ", " - } - } - } - } - case reflect.Map: - keys := v.MapKeys() - - // it's map but has a ",count" header filter, allow the zeros. - if header.ValueAsCountable { - vi = len(keys) - return extractCells(pos, header, reflect.ValueOf(vi), whenStructTagsOnly) - } - - if len(keys) == 0 { - return - } - - // if keys are string and value can be represented as string without taking too much space, - // then show as key = value\nkey = value... otherwise as show as indented json. - for i, key := range keys { - val := v.MapIndex(key) - if keyK := key.Kind(); keyK == reflect.String { - valK := val.Kind() - if valK == reflect.Interface || valK == reflect.Ptr { - val = val.Elem() - valK = val.Kind() - } - - if valK == reflect.Struct || valK == reflect.Slice || valK == reflect.Map || valK == reflect.Array { - continue - } - - valStr := strings.TrimSpace(fmt.Sprintf("%v", val.Interface())) - if valStr == "" { - continue - } - - // strconv.Quote(valStr) - s += key.Interface().(string) + " = " + cellText(valStr, 20) - if i < len(keys)-1 { - s += "\n" - } - } - } - - if s == "" { - b, err := json.MarshalIndent(vi, " ", " ") - if err != nil { - s = fmt.Sprintf("%v", vi) - } else { - b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) - b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) - b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) - s = string(b) - } - } - - default: - switch t := vi.(type) { - // Give priority to String() string functions inside the struct, if it's there then it's the whole cell string, - // otherwise if it's struct it's the fields if TagsOnly == false, useful for dynamic maps. - case fmt.Stringer: - s = t.String() - case struct{}: - rr, rightEmbeddedSlices := getRowFromStruct(reflect.ValueOf(vi), whenStructTagsOnly) - if len(rr) > 0 { - cells = append(cells, rr...) - for range rightEmbeddedSlices { - rightCells = append(rightCells, pos) - pos++ - } - - return - } - default: - s = fmt.Sprintf("%v", vi) - } - } - - if header.ValueAsNumber { - sInt64, err := strconv.ParseInt(s, 10, 64) - if err != nil || sInt64 == 0 { - s = header.AlternativeValue - if s == "" { - s = "0" - } - } else { - s = nearestThousandFormat(float64(sInt64)) - } - - rightCells = append(rightCells, pos) - } else if header.ValueAsDate { - t, err := time.Parse(time.RFC3339, s) - if err == nil { - s = t.Format("2006-01-02 15:04:05") - } - } - - if s == "" { - s = header.AlternativeValue - } - - cells = append(cells, s) - } - - return -} diff --git a/vendor/github.com/lensesio/tableprinter/slice.go b/vendor/github.com/lensesio/tableprinter/slice.go deleted file mode 100644 index c6c014338..000000000 --- a/vendor/github.com/lensesio/tableprinter/slice.go +++ /dev/null @@ -1,65 +0,0 @@ -package tableprinter - -import ( - "reflect" -) - -type sliceParser struct { - TagsOnly bool -} - -var emptyStruct = struct{}{} - -func (p *sliceParser) Parse(v reflect.Value, filters []RowFilter) ([]string, [][]string, []int) { - headers := p.ParseHeaders(v) - rows, nums := p.ParseRows(v, filters) - return headers, rows, nums -} - -func (p *sliceParser) ParseRows(v reflect.Value, filters []RowFilter) (rows [][]string, nums []int) { - for i, n := 0, v.Len(); i < n; i++ { - item := indirectValue(v.Index(i)) - if !CanAcceptRow(item, filters) { - continue - } - - if item.Kind() != reflect.Struct { - // if not struct, don't search its fields, just put a row as it's. - c, r := extractCells(i, emptyHeader, indirectValue(item), p.TagsOnly) - rows = append(rows, r) - nums = append(nums, c...) - continue - } - r, c := getRowFromStruct(item, p.TagsOnly) - - nums = append(nums, c...) - - rows = append(rows, r) - } - - return -} - -func (p *sliceParser) ParseHeaders(v reflect.Value) (headers []string) { - tmp := make(map[reflect.Type]struct{}) - - for i, n := 0, v.Len(); i < n; i++ { - item := indirectValue(v.Index(i)) - - // no filters. - itemTyp := item.Type() - if _, ok := tmp[itemTyp]; !ok { - // make headers once per type. - tmp[itemTyp] = emptyStruct - hs := extractHeadersFromStruct(itemTyp, p.TagsOnly) - if len(hs) == 0 { - continue - } - for _, h := range hs { - headers = append(headers, h.Name) - } - } - } - - return -} diff --git a/vendor/github.com/lensesio/tableprinter/struct.go b/vendor/github.com/lensesio/tableprinter/struct.go deleted file mode 100644 index cfa28937d..000000000 --- a/vendor/github.com/lensesio/tableprinter/struct.go +++ /dev/null @@ -1,421 +0,0 @@ -package tableprinter - -import ( - "reflect" - "strconv" - "strings" - "sync" - "time" -) - -// StructHeaders are being cached from the root-level structure to print out. -// They can be customized for custom head titles. -// -// Header can also contain the necessary information about its values, useful for its presentation -// such as alignment, alternative value if main is empty, if the row should print the number of elements inside a list or if the column should be formated as number. -var ( - StructHeaders = make(map[reflect.Type][]StructHeader) // type is the root struct. - structsHeadersMu sync.RWMutex -) - -type structParser struct { - TagsOnly bool -} - -func (p *structParser) Parse(v reflect.Value, filters []RowFilter) ([]string, [][]string, []int) { - if !CanAcceptRow(v, filters) { - return nil, nil, nil - } - - row, nums := p.ParseRow(v) - - return p.ParseHeaders(v), [][]string{row}, nums -} - -func (p *structParser) ParseHeaders(v reflect.Value) []string { - hs := extractHeadersFromStruct(v.Type(), true) - if len(hs) == 0 { - return nil - } - - headers := make([]string, len(hs)) - for idx := range hs { - headers[idx] = hs[idx].Name - } - - return headers -} - -func (p *structParser) ParseRow(v reflect.Value) ([]string, []int) { - return getRowFromStruct(v, p.TagsOnly) -} - -// TimestampHeaderTagValue the header's value of a "timestamp" header tag functionality. -type TimestampHeaderTagValue struct { - FromMilliseconds bool - - UTC bool - Local bool - - Human bool - - Format string -} - -// StructHeader contains the name of the header extracted from the struct's `HeaderTag` field tag. -type StructHeader struct { - Name string - // Position is the horizontal position (start from zero) of the header. - Position int - - ValueAsNumber bool - ValueAsCountable bool - ValueAsText bool - ValueAsTimestamp bool - TimestampValue TimestampHeaderTagValue - ValueAsDate bool - ValueAsDuration bool - - AlternativeValue string -} - -func extractHeaderFromStructField(f reflect.StructField, pos int, tagsOnly bool) (header StructHeader, ok bool) { - if f.PkgPath != "" { - return // ignore unexported fields. - } - - headerTag := f.Tag.Get(HeaderTag) - if headerTag == "" && tagsOnly { - return emptyHeader, false - } - - // embedded structs are acting like headers appended to the existing(s). - if f.Type.Kind() == reflect.Struct { - return emptyHeader, false - } else if headerTag != "" { - if header, ok := extractHeaderFromTag(headerTag); ok { - header.Position = pos - return header, true - } - - } else if !tagsOnly { - return StructHeader{ - Position: pos, - Name: f.Name, - }, true - } - - return emptyHeader, false -} - -func extractHeadersFromStruct(typ reflect.Type, tagsOnly bool) (headers []StructHeader) { - typ = indirectType(typ) - if typ.Kind() != reflect.Struct { - return - } - - // search cache. - structsHeadersMu.RLock() - if cached, has := StructHeaders[typ]; has { - structsHeadersMu.RUnlock() - return cached - } - structsHeadersMu.RUnlock() - - for i, n := 0, typ.NumField(); i < n; i++ { - f := typ.Field(i) - if f.Type.Kind() == reflect.Struct && f.Tag.Get(HeaderTag) == InlineHeaderTag { - hs := extractHeadersFromStruct(f.Type, tagsOnly) - headers = append(headers, hs...) - continue - } - - header, _ := extractHeaderFromStructField(f, i, tagsOnly) - if header.Name != "" { - headers = append(headers, header) - } - } - - if len(headers) > 0 { - // insert to cache if it's valid table. - structsHeadersMu.Lock() - StructHeaders[typ] = headers - structsHeadersMu.Unlock() - } - - return headers -} - -var ( - timeStdFormats = map[string]string{ - TimestampFormatANSICHeaderTag: time.ANSIC, - TimestampFormatUnixDateCHeaderTag: time.UnixDate, - TimestampFormatRubyDateHeaderTag: time.RubyDate, - TimestampFormatRFC822HeaderTag: time.RFC822, - TimestampFormatRFC822ZHeaderTag: time.RFC822Z, // default one. - TimestampFormatRFC850HeaderTag: time.RFC850, - TimestampFormatRFC1123HeaderTag: time.RFC1123, - TimestampFormatRFC1123ZHeaderTag: time.RFC1123Z, - TimestampFormatRFC3339HeaderTag: time.RFC3339, - TimestampFormatARFC3339NanoHeaderTag: time.RFC3339Nano, - } - - emptyTimestampHeaderTagValue TimestampHeaderTagValue -) - -func extractTimestampHeader(timestampHeaderTagValue string) (TimestampHeaderTagValue, bool) { - if !strings.HasPrefix(timestampHeaderTagValue, TimestampHeaderTag) { - return emptyTimestampHeaderTagValue, false // should never happen at this state. - } - - if len(timestampHeaderTagValue) == len(TimestampHeaderTag) { - // timestamp without args. - return emptyTimestampHeaderTagValue, true - } - - trail := timestampHeaderTagValue[len(TimestampHeaderTag):] // timestamp:<<(....)>> - if !strings.HasPrefix(trail, "(") || !strings.HasSuffix(trail, ")") { - // invalid format for args, but still a valid simple timestamp. - return emptyTimestampHeaderTagValue, true - } - - t := TimestampHeaderTagValue{} - argsLine := trail[1 : len(trail)-1] - args := strings.Split(argsLine, "|") - for _, arg := range args { - // arg = strings.ToLower(arg) - switch arg { - case TimestampFromMillisecondsHeaderTag: - t.FromMilliseconds = true - case TimestampAsUTCHeaderTag: - t.UTC = true - case TimestampAsLocalHeaderTag: - t.Local = true - case TimestampFormatHumanHeaderTag: - t.Human = true - case // formats are specific. - TimestampFormatANSICHeaderTag, - TimestampFormatUnixDateCHeaderTag, - TimestampFormatRubyDateHeaderTag, - TimestampFormatRFC822HeaderTag, - TimestampFormatRFC822ZHeaderTag, - TimestampFormatRFC850HeaderTag, - TimestampFormatRFC1123HeaderTag, - TimestampFormatRFC1123ZHeaderTag, - TimestampFormatRFC3339HeaderTag, - TimestampFormatARFC3339NanoHeaderTag: - if expectedFormat, ok := timeStdFormats[arg]; ok { - t.Format = expectedFormat - } - default: - // custom format. - t.Format = arg - } - } - - if t.Format == "" { - t.Format = TimestampFormatRFC822ZHeaderTag - } - - return t, true -} -func extractHeaderFromTag(headerTag string) (header StructHeader, ok bool) { - if headerTag == "" { - return - } - ok = true - - parts := strings.Split(headerTag, ",") - - // header name is the first part. - header.Name = parts[0] - - if len(parts) > 1 { - for _, hv := range parts[1:] /* except the first part ofc which should be the header value */ { - switch hv { - // any position. - case NumberHeaderTag: - header.ValueAsNumber = true - case CountHeaderTag: - header.ValueAsCountable = true - case ForceTextHeaderTag: - header.ValueAsText = true - case DurationHeaderTag: - header.ValueAsDuration = true - case DateHeaderTag: - header.ValueAsDate = true - default: - if strings.HasPrefix(hv, TimestampHeaderTag) { - header.TimestampValue, header.ValueAsTimestamp = extractTimestampHeader(hv) - continue - } - - header.AlternativeValue = hv - } - } - } - - return -} - -// getRowFromStruct returns the positions of the cells that should be aligned to the right -// and the list of cells(= the values based on the cell's description) based on the "in" value. -func getRowFromStruct(v reflect.Value, tagsOnly bool) (cells []string, rightCells []int) { - typ := v.Type() - j := 0 - - for i, n := 0, typ.NumField(); i < n; i++ { - - f := typ.Field(i) - header, ok := extractHeaderFromStructField(f, j, tagsOnly) - if !ok { - if f.Type.Kind() == reflect.Struct && f.Tag.Get(HeaderTag) == InlineHeaderTag { - fieldValue := indirectValue(v.Field(i)) - c, rc := getRowFromStruct(fieldValue, tagsOnly) - for _, rcc := range rc { - rightCells = append(rightCells, rcc+j) - } - cells = append(cells, c...) - j++ - } - - continue - } - - fieldValue := indirectValue(v.Field(i)) - c, r := extractCells(j, header, fieldValue, tagsOnly) - rightCells = append(rightCells, c...) - cells = append(cells, r...) - j++ - } - - return -} - -// RemoveStructHeader will dynamically remove a specific header tag from a struct's field -// based on the "fieldName" which must be a valid exported field of the struct. -// It returns the new, converted, struct value. -// -// If "original" is not a struct or -// the "fieldName" was unable to be found then the "item" is returned as it was before this call. -// -// See `SetStructHeader` too. -func RemoveStructHeader(original interface{}, fieldName string) interface{} { - return SetStructHeader(original, fieldName, "") -} - -// SetStructHeader dynamically sets the "newHeaderValue" to a specific struct's field's header tag's value -// based on the "fieldName" which must be a valid exported field of the struct. -// -// It returns the new, converted, struct value. -// -// If "original" is not a struct or -// the "fieldName" was unable to be found then the "item" is returned as it was before this call. -// -// If the "newValue" is empty then the whole header will be removed, see `RemoveStructHeader` too. -func SetStructHeader(original interface{}, fieldName string, newHeaderValue string) interface{} { - if original == nil { - return nil - } - - typ := indirectType(reflect.TypeOf(original)) - if typ.Kind() != reflect.Struct { - return original - } - - // we will catch only exported fields in order to convert the type successfully, so dynamic length, - // in any case, the unexported fields are not used inside a table at all. - n := typ.NumField() - fs := make([]reflect.StructField, 0) - // 1. copy the struct's fields, we will make a new one based on the "original" - // 2. if the "fieldName" found then we can continue and clear the header tag, otherwise return "original". - found := false - if len(newHeaderValue) > 0 && !strings.Contains(newHeaderValue, `"`) { - newHeaderValue = strconv.Quote(newHeaderValue) - } - - for i := 0; i < n; i++ { - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - if f.Name == fieldName { - found = true - // json:"value" xml:"value header:"value" - // header:"value" - // json:"value" header:"value" - // header:"value" xml:"value" json:"value" - // json:"value" header:"value,options" xml:"value", we need to change only the [start: header:]...[end: last" or space], - // but let's do it without for loops here, search for old value and change it if exists, otherwise append the whole header tag, - // remove that if "newValue" is empty. - oldHeaderValue := f.Tag.Get(HeaderTag) - - if oldHeaderValue == "" { - if newHeaderValue != "" { - line := string(f.Tag) - if line != "" { - line += " " - } - // set the header tag, append to the existing tag value(not just the header one) (if exists a space is prepended before the header tag). - f.Tag = reflect.StructTag(line + HeaderTag + ":" + newHeaderValue) - } else { - // do nothing, new header value is empty and old header tag does not exist. - } - } else { - // quote it. - oldHeaderValue = strconv.Quote(oldHeaderValue) - // simple replace. - tag := string(f.Tag) - if newHeaderValue != "" { - tag = strings.Replace(tag, HeaderTag+":"+oldHeaderValue, HeaderTag+":"+newHeaderValue, 1) - } else { - // should remove the `HeaderTag`(?[space]header:[?value][?space]) if it's there. - // - // note: strings.Join/Split usage, not searching through char looping, although it would be faster but we don't really care here, - // keep it simpler to change. - tagValues := strings.Split(tag, " ") - for j, part := range tagValues { // the whole value. - if strings.HasPrefix(part, HeaderTag+":") { - tagValues = append(tagValues[:j], tagValues[j+1:]...) - break - } - } - - tag = strings.Join(tagValues, " ") - } - - f.Tag = reflect.StructTag(tag) - } - } - - fs = append(fs, f) - } - - if !found { - return original - } - - withoutHeaderTyp := reflect.StructOf(fs) - tmp := reflect.New(withoutHeaderTyp).Elem() - - // fill the fields. - v := indirectValue(reflect.ValueOf(original)) - for i := 0; i < n; i++ { - f := typ.Field(i) - if f.PkgPath != "" { - // "original" may have unexported fields, so we check by name, see below. - continue - } - - for j := 0; j < withoutHeaderTyp.NumField(); j++ { - tmpF := withoutHeaderTyp.Field(j) - if tmpF.Name == f.Name { - tmp.Field(j).Set(v.Field(i)) - } - } - } - - return tmp.Interface() - // return indirectValue(reflect.ValueOf(original)).Convert(withoutHeaderTyp).Interface() -} diff --git a/vendor/github.com/lensesio/tableprinter/tableprinter.go b/vendor/github.com/lensesio/tableprinter/tableprinter.go deleted file mode 100644 index 8f3a5113c..000000000 --- a/vendor/github.com/lensesio/tableprinter/tableprinter.go +++ /dev/null @@ -1,467 +0,0 @@ -package tableprinter - -import ( - "fmt" - "io" - "os" - "reflect" - "strings" - - "github.com/kataras/tablewriter" -) - -// Alignment is the alignment type (int). -// -// See `Printer#DefaultColumnAlignment` and `Printer#DefaultColumnAlignment` too. -type Alignment int - -const ( - // AlignDefault is the default alignment (0). - AlignDefault Alignment = iota - // AlignCenter is the center aligment (1). - AlignCenter - // AlignRight is the right aligment (2). - AlignRight - // AlignLeft is the left aligment (3). - AlignLeft -) - -// Printer contains some information about the final table presentation. -// Look its `Print` function for more. -type Printer struct { - // out can not change during its work because the `acquire/release table` must work with only one output target, - // a new printer should be declared for a different output. - out io.Writer - - AutoFormatHeaders bool - AutoWrapText bool - - BorderTop, BorderLeft, BorderRight, BorderBottom bool - - HeaderLine bool - HeaderAlignment Alignment - HeaderColors []tablewriter.Colors - HeaderBgColor int - HeaderFgColor int - - RowLine bool - ColumnSeparator string - NewLine string - CenterSeparator string - RowSeparator string - RowCharLimit int - RowTextWrap bool // if RowCharLimit > 0 && RowTextWrap == true then wrap the line otherwise replace the trailing with "...". - - DefaultAlignment Alignment // see `NumbersAlignment` too. - NumbersAlignment Alignment - - RowLengthTitle func(int) bool - AllowRowsOnly bool // if true then `Print/Render` will print the headers even if parsed rows where no found. Useful for putting rows to a table manually. - - table *tablewriter.Table -} - -// Default is the default Table Printer. -var Default = Printer{ - out: os.Stdout, - AutoFormatHeaders: true, - AutoWrapText: false, - - BorderTop: false, - BorderLeft: false, - BorderRight: false, - BorderBottom: false, - - HeaderLine: true, - HeaderAlignment: AlignLeft, - - RowLine: false, /* it could be true as well */ - ColumnSeparator: " ", - NewLine: "\n", - CenterSeparator: " ", /* it could be empty as well */ - RowSeparator: tablewriter.ROW, - RowCharLimit: 60, - RowTextWrap: true, - - DefaultAlignment: AlignLeft, - NumbersAlignment: AlignRight, - - RowLengthTitle: func(rowsLength int) bool { - // if more than 3 then show the length of rows. - return rowsLength > 3 - }, - - AllowRowsOnly: true, -} - -// New creates and initializes a Printer with the default values based on the "w" target writer. -// -// See its `Print`, `PrintHeadList` too. -func New(w io.Writer) *Printer { - return &Printer{ - out: w, - - AutoFormatHeaders: Default.AutoFormatHeaders, - AutoWrapText: Default.AutoWrapText, - - BorderTop: Default.BorderTop, - BorderLeft: Default.BorderLeft, - BorderRight: Default.BorderRight, - BorderBottom: Default.BorderBottom, - - HeaderLine: Default.HeaderLine, - HeaderAlignment: Default.HeaderAlignment, - - RowLine: Default.RowLine, - ColumnSeparator: Default.ColumnSeparator, - NewLine: Default.NewLine, - CenterSeparator: Default.CenterSeparator, - RowSeparator: Default.RowSeparator, - RowCharLimit: Default.RowCharLimit, - RowTextWrap: Default.RowTextWrap, - - DefaultAlignment: Default.DefaultAlignment, - NumbersAlignment: Default.NumbersAlignment, - - RowLengthTitle: Default.RowLengthTitle, - AllowRowsOnly: Default.AllowRowsOnly, - } -} - -func (p *Printer) acquireTable() *tablewriter.Table { - table := p.table - if table == nil { - table = tablewriter.NewWriter(p.out) - - // these properties can change until first `Print/Render` call. - table.SetAlignment(int(p.DefaultAlignment)) - table.SetAutoFormatHeaders(p.AutoFormatHeaders) - table.SetAutoWrapText(p.AutoWrapText) - table.SetBorders(tablewriter.Border{Top: p.BorderTop, Left: p.BorderLeft, Right: p.BorderRight, Bottom: p.BorderBottom}) - table.SetHeaderLine(p.HeaderLine) - table.SetHeaderAlignment(int(p.HeaderAlignment)) - table.SetRowLine(p.RowLine) - table.SetColumnSeparator(p.ColumnSeparator) - table.SetNewLine(p.NewLine) - table.SetCenterSeparator(p.CenterSeparator) - table.SetRowSeparator(p.RowSeparator) - - p.table = table - } - - return table -} - -func (p *Printer) calculateColumnAlignment(numbersColsPosition []int, size int) []int { - columnAlignment := make([]int, size) - for i := range columnAlignment { - columnAlignment[i] = int(p.DefaultAlignment) - - for _, j := range numbersColsPosition { - if i == j { - columnAlignment[i] = int(p.NumbersAlignment) - break - } - } - } - - return columnAlignment -} - -// Render prints a table based on the rules of this "p" Printer. -// -// It's used to customize manually the parts of a table like the headers. -// If need to append a row after its creation you should create a new `Printer` instance by calling the `New` function -// and use its `RenderRow` instead, because the "w" writer is different on each package-level printer function. -// -// Returns the total amount of rows written to the table. -func Render(w io.Writer, headers []string, rows [][]string, numbersColsPosition []int, reset bool) int { - return New(w).Render(headers, rows, numbersColsPosition, reset) -} - -// TODO: auto-remove headers and columns based on the user's terminal width (static), -// if `getTerminalWidth() == maxWidth` then don't bother, show the expected based on the `PrintXXX` func. -// -// Note that the font size of the terminal is customizable, so don't expect it to work precisely everywhere. -const maxWidth = 7680 - -func (p *Printer) calcWidth(k []string) (rowWidth int) { - for _, r := range k { - w := tablewriter.DisplayWidth(r) + len(p.ColumnSeparator) + len(p.CenterSeparator) + len(p.RowSeparator) - rowWidth += w - } - - return -} - -// it "works" but not always, need more research or just let the new `RowCharLimit` and `RowTextWrap` do their job to avoid big table on small terminal. -func (p *Printer) formatTableBasedOnWidth(headers []string, rows [][]string, fontSize int) ([]string, [][]string) { - totalWidthPreCalculated := p.calcWidth(headers) - var rowsWidth int - - for _, rs := range rows { - w := p.calcWidth(rs) - if w > rowsWidth { - rowsWidth = w - } - } - - if rowsWidth > totalWidthPreCalculated { - totalWidthPreCalculated = rowsWidth - } - - pd := float64(fontSize/9) * 1.2 - pdTrail := fontSize + fontSize/3 - totalWidthPreCalculated = int(float64(totalWidthPreCalculated)*pd + float64(pdTrail)) - - termWidth := int(getTerminalWidth()) - if totalWidthPreCalculated > termWidth { - dif := totalWidthPreCalculated - termWidth - difSpace := int(float64(fontSize) * 0.6) - // remove the last element of the rows and the last header. - if dif >= difSpace { - for idx, r := range rows { - rLastIdx := len(r) - 1 - r = append(r[:rLastIdx], r[rLastIdx+1:]...) - rows[idx] = r - } - if len(headers) > 0 { - hLastIdx := len(headers) - 1 - headers = append(headers[:hLastIdx], headers[hLastIdx+1:]...) - } - return p.formatTableBasedOnWidth(headers, rows, fontSize) - } - } - - return headers, rows -} - -// Render prints a table based on the rules of this "p" Printer. -// -// It's used to customize manually the parts of a table like the headers. -// It can be used side by side with the `RenderRow`, first and once `Render`, after and maybe many `RenderRow`. -// -// Returns the total amount of rows written to the table. -func (p *Printer) Render(headers []string, rows [][]string, numbersColsPosition []int, reset bool) int { - table := p.acquireTable() - - if reset { - // ClearHeaders added on kataras/tablewriter version, Changes from the original repository: - // https://github.com/olekukonko/tablewriter/compare/master...kataras:master - table.ClearHeaders() - table.ClearRows() - p.HeaderColors = nil - } - - // headers, rows = p.formatTableBasedOnWidth(headers, rows, 11) - - if len(headers) > 0 { - if p.RowLengthTitle != nil && p.RowLengthTitle(len(rows)) { - headers[0] = fmt.Sprintf("%s (%d) ", headers[0], len(rows)) - } - - table.SetHeader(headers) - - // colors must set after headers, depends on the number of headers. - if l := len(p.HeaderColors); l > 0 { - // dev set header color for each header, can panic if not match - table.SetHeaderColor(p.HeaderColors...) - } else if bg, fg := p.HeaderBgColor, p.HeaderFgColor; bg > 0 || fg > 0 { - colors := make([]tablewriter.Colors, len(headers)) - for i := range headers { - colors[i] = tablewriter.Color(bg, fg) - } - p.HeaderColors = colors - table.SetHeaderColor(colors...) - } - - } else if !p.AllowRowsOnly { - return 0 // if not allow to print anything without headers, then exit. - } - - if p.RowCharLimit > 0 { - for i, rs := range rows { - rows[i] = p.rowText(rs) - } - } - - table.AppendBulk(rows) - table.SetColumnAlignment(p.calculateColumnAlignment(numbersColsPosition, len(headers))) - - table.Render() - return table.NumLines() -} - -func cellText(cell string, charLimit int) string { - if strings.Contains(cell, "\n") { - if strings.HasSuffix(cell, "\n") { - cell = cell[0 : len(cell)-2] - if len(cell) > charLimit { - return cellText(cell, charLimit) - } - } - - return cell - } - - words := strings.Fields(strings.TrimSpace(cell)) - if len(words) == 0 { - return cell - } - - cell = words[0] - rem := charLimit - len(cell) - for _, w := range words[1:] { - if c := len(w) + 1; c <= rem { - cell += " " + w - rem -= c + 1 // including space. - continue - } - - cell += "\n" + w - rem = charLimit - len(w) - } - - return cell -} - -func (p *Printer) rowText(row []string) []string { - if p.RowCharLimit <= 0 { - return row - } - - for j, r := range row { - if len(r) <= p.RowCharLimit { - continue - } - - row[j] = cellText(r, p.RowCharLimit) - } - - return row -} - -// RenderRow prints a row based on the same alignment rules to the last `Print` or `Render`. -// It can be used to live update the table. -// -// Returns the total amount of rows written to the table. -func (p *Printer) RenderRow(row []string, numbersColsPosition []int) int { - table := p.acquireTable() - row = p.rowText(row) - - table.SetColumnAlignment(p.calculateColumnAlignment(numbersColsPosition, len(row))) - - // RenderRowOnce added on kataras/tablewriter version, Changes from the original repository: - // https://github.com/olekukonko/tablewriter/compare/master...kataras:master - return table.RenderRowOnce(row) -} - -// Print outputs whatever "in" value passed as a table to the "w", -// filters cna be used to control what rows can be visible or hidden. -// Usage: -// Print(os.Stdout, values, func(t MyStruct) bool { /* or any type, depends on the type(s) of the "t" */ -// return t.Visibility != "hidden" -// }) -// -// Returns the total amount of rows written to the table or -// -1 if printer was unable to find a matching parser or if headers AND rows were empty. -func Print(w io.Writer, in interface{}, filters ...interface{}) int { - return New(w).Print(in, filters...) -} - -// Print outputs whatever "in" value passed as a table, filters can be used to control what rows can be visible and which not. -// Usage: -// Print(values, func(t MyStruct) bool { /* or any type, depends on the type(s) of the "t" */ -// return t.Visibility != "hidden" -// }) -// -// Returns the total amount of rows written to the table or -// -1 if printer was unable to find a matching parser or if headers AND rows were empty. -func (p *Printer) Print(in interface{}, filters ...interface{}) int { - v := indirectValue(reflect.ValueOf(in)) - f := MakeFilters(v, filters...) - - parser := WhichParser(v.Type()) - if parser == nil { - return -1 - } - - headers, rows, nums := parser.Parse(v, f) - if len(headers) == 0 && len(rows) == 0 { - return -1 - } - - return p.Render(headers, rows, nums, true) -} - -// PrintJSON prints the json-bytes as a table to the "w", -// filters cna be used to control what rows can be visible or hidden. -// -// Returns the total amount of rows written to the table or -// -1 if headers AND rows were empty. -func PrintJSON(w io.Writer, in []byte, filters ...interface{}) int { - return New(w).PrintJSON(in, filters...) -} - -// PrintJSON prints the json-bytes as a table, -// filters cna be used to control what rows can be visible or hidden. -// -// Returns the total amount of rows written to the table or -// -1 if headers AND rows were empty. -func (p *Printer) PrintJSON(in interface{}, filters ...interface{}) int { - v := indirectValue(reflect.ValueOf(in)) - f := MakeFilters(v, filters...) - - if !v.IsValid() { - return -1 - } - - headers, rows, nums := JSONParser.Parse(v, f) - if len(headers) == 0 && len(rows) == 0 { - return -1 - } - - return p.Render(headers, rows, nums, true) -} - -// PrintHeadList prints whatever "list" as a table to the "w" with a single header. -// The "list" should be a slice of something, however -// that list can also contain different type of values, even interface{}, the function will parse each of its elements differently if needed. -// -// It can be used when want to print a simple list of string, i.e names []string, a single column each time. -// -// Returns the total amount of rows written to the table. -func PrintHeadList(w io.Writer, list interface{}, header string, filters ...interface{}) int { - return New(w).PrintHeadList(list, header, filters...) -} - -var emptyHeader StructHeader - -// PrintHeadList prints whatever "list" as a table with a single header. -// The "list" should be a slice of something, however -// that list can also contain different type of values, even interface{}, the function will parse each of its elements differently if needed. -// -// It can be used when want to print a simple list of string, i.e names []string, a single column each time. -// -// Returns the total amount of rows written to the table. -func (p *Printer) PrintHeadList(list interface{}, header string, filters ...interface{}) int { - items := indirectValue(reflect.ValueOf(list)) - if items.Kind() != reflect.Slice { - return 0 - } - - var ( - rows [][]string - numbersColsPosition []int - ) - - for i, n := 0, items.Len(); i < n; i++ { - item := items.Index(i) - c, r := extractCells(i, emptyHeader, indirectValue(item), true) - rows = append(rows, r) - numbersColsPosition = append(numbersColsPosition, c...) - } - - headers := []string{header} - return p.Render(headers, rows, numbersColsPosition, true) -} diff --git a/vendor/github.com/lensesio/tableprinter/terminal_util.go b/vendor/github.com/lensesio/tableprinter/terminal_util.go deleted file mode 100644 index 28aedd5f0..000000000 --- a/vendor/github.com/lensesio/tableprinter/terminal_util.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux,cgo darwin,cgo - -package tableprinter - -import ( - "syscall" - "unsafe" -) - -type winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - -func getTerminalWidth() uint { - ws := &winsize{} - retCode, _, _ := syscall.Syscall(syscall.SYS_IOCTL, uintptr(syscall.Stdin), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - - if int(retCode) == -1 { - return maxWidth - } - - return uint(ws.Col) -} diff --git a/vendor/github.com/lensesio/tableprinter/terminal_util_stubs.go b/vendor/github.com/lensesio/tableprinter/terminal_util_stubs.go deleted file mode 100644 index c0be11ea2..000000000 --- a/vendor/github.com/lensesio/tableprinter/terminal_util_stubs.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!darwin !cgo - -package tableprinter - -func getTerminalWidth() uint { - return maxWidth -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 37dd19a11..3f26c1fb7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -243,9 +243,6 @@ github.com/devigned/tab # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous -# github.com/dustin/go-humanize v1.0.0 -## explicit -github.com/dustin/go-humanize # github.com/dvsekhvalnov/jose2go v1.5.0 ## explicit; go 1.15 github.com/dvsekhvalnov/jose2go @@ -484,9 +481,6 @@ github.com/json-iterator/go # github.com/julienschmidt/httprouter v1.3.0 ## explicit; go 1.7 github.com/julienschmidt/httprouter -# github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23 -## explicit -github.com/kataras/tablewriter # github.com/klauspost/compress v1.15.14 ## explicit; go 1.17 github.com/klauspost/compress @@ -504,9 +498,6 @@ github.com/kubemq-io/kubemq-go/queues_stream # github.com/kubemq-io/protobuf v1.3.1 ## explicit; go 1.16 github.com/kubemq-io/protobuf/go -# github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7 -## explicit -github.com/lensesio/tableprinter # github.com/lib/pq v1.10.4 ## explicit; go 1.13 # github.com/linkedin/goavro/v2 v2.9.8