Skip to content

Commit

Permalink
Incorporate docker based etcd into Go integration tests
Browse files Browse the repository at this point in the history
PR 4 for #4144

High level approach is as described in #4144 .

This PR integrates docker based etcd into our Go integration tests. It removes the need to have the embed package running
in m3db for them, but doesn't yet touch that functionality.

commit-id:3ae12ffd
  • Loading branch information
andrewmains12 committed Aug 29, 2022
1 parent 42a33a5 commit 7b49976
Show file tree
Hide file tree
Showing 16 changed files with 349 additions and 131 deletions.
14 changes: 13 additions & 1 deletion src/integration/aggregator/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ import (

const (
// TestAggregatorDBNodeConfig is the test config for the dbnode.
// TODO (amainsd): needs to use host.docker.internal for buildkite
TestAggregatorDBNodeConfig = `
db: {}
"db": {}
coordinator: {}
`

Expand Down Expand Up @@ -117,6 +118,17 @@ ingest:
maxBackoff: 10s
jitter: true
storeMetricsType: true
clusterManagement:
etcd:
env: default_env
zone: embedded
service: m3db
cacheDir: /var/lib/m3kv
etcdClusters:
- zone: embedded
endpoints:
- 127.0.0.1:2379
`

// TestAggregatorAggregatorConfig is the test config for the aggregators.
Expand Down
2 changes: 2 additions & 0 deletions src/integration/aggregator/aggregator_test.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
//go:build cluster_integration
// +build cluster_integration

//
// Copyright (c) 2021 Uber Technologies, Inc.
//
Expand Down
12 changes: 6 additions & 6 deletions src/integration/prometheus/prometheus.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ import (
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/kvconfig"
"github.com/m3db/m3/src/integration/resources"
"github.com/m3db/m3/src/integration/resources/docker"
"github.com/m3db/m3/src/integration/resources/docker/dockerexternal"
"github.com/m3db/m3/src/query/api/v1/handler/database"
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/generated/proto/prompb"
Expand Down Expand Up @@ -89,7 +89,7 @@ func RunTest(t *testing.T, m3 resources.M3Resources, prom resources.ExternalReso

logger.Info("running prometheus tests")

p := prom.(*docker.Prometheus)
p := prom.(*dockerexternal.Prometheus)

testPrometheusRemoteRead(t, p, logger)
testPrometheusRemoteWriteMultiNamespaces(t, p, logger)
Expand Down Expand Up @@ -118,15 +118,15 @@ func RunTest(t *testing.T, m3 resources.M3Resources, prom resources.ExternalReso
testDebugPromReturnsDuplicates(t, m3, logger)
}

func testPrometheusRemoteRead(t *testing.T, p *docker.Prometheus, logger *zap.Logger) {
func testPrometheusRemoteRead(t *testing.T, p *dockerexternal.Prometheus, logger *zap.Logger) {
// Ensure Prometheus can proxy a Prometheus query
logger.Info("testing prometheus remote read")
verifyPrometheusQuery(t, p, "prometheus_remote_storage_samples_total", 100)
}

func testPrometheusRemoteWriteMultiNamespaces(
t *testing.T,
p *docker.Prometheus,
p *dockerexternal.Prometheus,
logger *zap.Logger,
) {
logger.Info("testing remote write to multiple namespaces")
Expand Down Expand Up @@ -1839,9 +1839,9 @@ func requireSeriesSuccess(
}))
}

func verifyPrometheusQuery(t *testing.T, p *docker.Prometheus, query string, threshold float64) {
func verifyPrometheusQuery(t *testing.T, p *dockerexternal.Prometheus, query string, threshold float64) {
require.NoError(t, resources.Retry(func() error {
res, err := p.Query(docker.PrometheusQueryRequest{
res, err := p.Query(dockerexternal.PrometheusQueryRequest{
Query: query,
})
if err != nil {
Expand Down
15 changes: 11 additions & 4 deletions src/integration/prometheus/prometheus_test.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
//go:build cluster_integration
// +build cluster_integration

//
// Copyright (c) 2021 Uber Technologies, Inc.
//
Expand All @@ -23,12 +25,14 @@
package prometheus

import (
"context"
"path"
"runtime"
"testing"
"time"

"github.com/m3db/m3/src/integration/resources"
"github.com/m3db/m3/src/integration/resources/docker"
"github.com/m3db/m3/src/integration/resources/docker/dockerexternal"
"github.com/m3db/m3/src/integration/resources/inprocess"

"github.com/ory/dockertest/v3"
Expand Down Expand Up @@ -60,14 +64,17 @@ func testSetup(t *testing.T) (resources.M3Resources, resources.ExternalResources
require.NoError(t, err)

_, filename, _, _ := runtime.Caller(0)
prom := docker.NewPrometheus(docker.PrometheusOptions{
prom := dockerexternal.NewPrometheus(dockerexternal.PrometheusOptions{
Pool: pool,
PathToCfg: path.Join(path.Dir(filename), "../resources/docker/config/prometheus.yml"),
})
require.NoError(t, prom.Setup())

ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
require.NoError(t, prom.Setup(ctx))

return m3, prom, func() {
assert.NoError(t, prom.Close())
assert.NoError(t, prom.Close(ctx))
assert.NoError(t, m3.Cleanup())
}
}
71 changes: 50 additions & 21 deletions src/integration/repair/repair_and_replication_test.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
//go:build cluster_integration
// +build cluster_integration

//
// Copyright (c) 2021 Uber Technologies, Inc.
//
Expand All @@ -23,13 +25,17 @@
package repair

import (
"context"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

"github.com/m3db/m3/src/integration/resources"
"github.com/m3db/m3/src/integration/resources/docker/dockerexternal"
"github.com/m3db/m3/src/integration/resources/inprocess"
"github.com/m3db/m3/src/x/instrument"

"github.com/ory/dockertest/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestRepairAndReplication(t *testing.T) {
Expand All @@ -40,11 +46,23 @@ func TestRepairAndReplication(t *testing.T) {
}

func testSetup(t *testing.T) (resources.M3Resources, resources.M3Resources, func()) {
fullCfgs1 := getClusterFullConfgs(t)
fullCfgs2 := getClusterFullConfgs(t)
pool, err := dockertest.NewPool("")
require.NoError(t, err)

ep1 := fullCfgs1.Configs.Coordinator.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0].Endpoints
ep2 := fullCfgs2.Configs.Coordinator.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0].Endpoints
etcd1 := mustNewStartedEtcd(t, pool)
etcd2 := mustNewStartedEtcd(t, pool)

cluster1Opts := newTestClusterOptions()
cluster1Opts.Etcd = etcd1

cluster2Opts := newTestClusterOptions()
cluster2Opts.Etcd = etcd2

fullCfgs1 := getClusterFullConfgs(t, cluster1Opts)
fullCfgs2 := getClusterFullConfgs(t, cluster2Opts)

ep1 := []string{etcd1.Address()}
ep2 := []string{etcd2.Address()}

setRepairAndReplicationCfg(
&fullCfgs1,
Expand All @@ -57,10 +75,10 @@ func testSetup(t *testing.T) (resources.M3Resources, resources.M3Resources, func
ep1,
)

cluster1, err := inprocess.NewClusterFromSpecification(fullCfgs1, clusterOptions)
cluster1, err := inprocess.NewClusterFromSpecification(fullCfgs1, cluster1Opts)
require.NoError(t, err)

cluster2, err := inprocess.NewClusterFromSpecification(fullCfgs2, clusterOptions)
cluster2, err := inprocess.NewClusterFromSpecification(fullCfgs2, cluster2Opts)
require.NoError(t, err)

return cluster1, cluster2, func() {
Expand All @@ -69,7 +87,14 @@ func testSetup(t *testing.T) (resources.M3Resources, resources.M3Resources, func
}
}

func getClusterFullConfgs(t *testing.T) inprocess.ClusterSpecification {
func mustNewStartedEtcd(t *testing.T, pool *dockertest.Pool) *dockerexternal.EtcdNode {
etcd, err := dockerexternal.NewEtcd(pool, instrument.NewOptions())
require.NoError(t, err)
require.NoError(t, etcd.Setup(context.TODO()))
return etcd
}

func getClusterFullConfgs(t *testing.T, clusterOptions resources.ClusterOptions) inprocess.ClusterSpecification {
cfgs, err := inprocess.NewClusterConfigsFromYAML(
TestRepairDBNodeConfig, TestRepairCoordinatorConfig, "",
)
Expand All @@ -84,18 +109,22 @@ func getClusterFullConfgs(t *testing.T) inprocess.ClusterSpecification {
func setRepairAndReplicationCfg(fullCfg *inprocess.ClusterSpecification, clusterName string, endpoints []string) {
for _, dbnode := range fullCfg.Configs.DBNodes {
dbnode.DB.Replication.Clusters[0].Name = clusterName
dbnode.DB.Replication.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0].Endpoints = endpoints
etcdService := &(dbnode.DB.Replication.Clusters[0].Client.EnvironmentConfig.Services[0].Service.ETCDClusters[0])
etcdService.AutoSyncInterval = -1
etcdService.Endpoints = endpoints
}
}

var clusterOptions = resources.ClusterOptions{
DBNode: &resources.DBNodeClusterOptions{
RF: 2,
NumShards: 4,
NumInstances: 1,
NumIsolationGroups: 2,
},
Coordinator: resources.CoordinatorClusterOptions{
GeneratePorts: true,
},
func newTestClusterOptions() resources.ClusterOptions {
return resources.ClusterOptions{
DBNode: &resources.DBNodeClusterOptions{
RF: 2,
NumShards: 4,
NumInstances: 1,
NumIsolationGroups: 2,
},
Coordinator: resources.CoordinatorClusterOptions{
GeneratePorts: true,
},
}
}
Loading

0 comments on commit 7b49976

Please sign in to comment.