diff --git a/client/resource_group/controller/controller.go b/client/resource_group/controller/controller.go index e5ad1f81fa5..6cb7995a42e 100644 --- a/client/resource_group/controller/controller.go +++ b/client/resource_group/controller/controller.go @@ -249,16 +249,13 @@ func (c *ResourceGroupsController) Start(ctx context.Context) { defer emergencyTokenAcquisitionTicker.Stop() failpoint.Inject("fastCleanup", func() { - cleanupTicker.Stop() - cleanupTicker = time.NewTicker(100 * time.Millisecond) + cleanupTicker.Reset(100 * time.Millisecond) // because of checking `gc.run.consumption` in cleanupTicker, // so should also change the stateUpdateTicker. - stateUpdateTicker.Stop() - stateUpdateTicker = time.NewTicker(200 * time.Millisecond) + stateUpdateTicker.Reset(200 * time.Millisecond) }) failpoint.Inject("acceleratedReportingPeriod", func() { - stateUpdateTicker.Stop() - stateUpdateTicker = time.NewTicker(time.Millisecond * 100) + stateUpdateTicker.Reset(time.Millisecond * 100) }) _, metaRevision, err := c.provider.LoadResourceGroups(ctx) diff --git a/pkg/keyspace/tso_keyspace_group.go b/pkg/keyspace/tso_keyspace_group.go index 5d7137ac3c2..f1ed6002a8c 100644 --- a/pkg/keyspace/tso_keyspace_group.go +++ b/pkg/keyspace/tso_keyspace_group.go @@ -162,8 +162,7 @@ func (m *GroupManager) allocNodesToAllKeyspaceGroups(ctx context.Context) { defer m.wg.Done() ticker := time.NewTicker(allocNodesToKeyspaceGroupsInterval) failpoint.Inject("acceleratedAllocNodes", func() { - ticker.Stop() - ticker = time.NewTicker(time.Millisecond * 100) + ticker.Reset(time.Millisecond * 100) }) defer ticker.Stop() log.Info("start to alloc nodes to all keyspace groups") diff --git a/pkg/mcs/resourcemanager/server/manager.go b/pkg/mcs/resourcemanager/server/manager.go index 58b8b5426a4..7a07714e07a 100644 --- a/pkg/mcs/resourcemanager/server/manager.go +++ b/pkg/mcs/resourcemanager/server/manager.go @@ -324,8 +324,7 @@ func (m *Manager) GetResourceGroupList(withStats bool) []*ResourceGroup { func (m *Manager) persistLoop(ctx context.Context) { ticker := time.NewTicker(time.Minute) failpoint.Inject("fastPersist", func() { - ticker.Stop() - ticker = time.NewTicker(100 * time.Millisecond) + ticker.Reset(100 * time.Millisecond) }) defer ticker.Stop() for { diff --git a/pkg/mcs/scheduling/server/server.go b/pkg/mcs/scheduling/server/server.go index 9ea369aae9e..e3a6d9bd648 100644 --- a/pkg/mcs/scheduling/server/server.go +++ b/pkg/mcs/scheduling/server/server.go @@ -178,8 +178,7 @@ func (s *Server) updateAPIServerMemberLoop() { defer cancel() ticker := time.NewTicker(memberUpdateInterval) failpoint.Inject("fastUpdateMember", func() { - ticker.Stop() - ticker = time.NewTicker(100 * time.Millisecond) + ticker.Reset(100 * time.Millisecond) }) defer ticker.Stop() var curLeader uint64 diff --git a/pkg/schedule/coordinator.go b/pkg/schedule/coordinator.go index 2a31045129e..2736c687fdb 100644 --- a/pkg/schedule/coordinator.go +++ b/pkg/schedule/coordinator.go @@ -217,7 +217,7 @@ func (c *Coordinator) RunUntilStop(collectWaitTime ...time.Duration) { func (c *Coordinator) Run(collectWaitTime ...time.Duration) { ticker := time.NewTicker(runSchedulerCheckInterval) failpoint.Inject("changeCoordinatorTicker", func() { - ticker = time.NewTicker(100 * time.Millisecond) + ticker.Reset(100 * time.Millisecond) }) defer ticker.Stop() log.Info("coordinator starts to collect cluster information") diff --git a/pkg/tso/allocator_manager.go b/pkg/tso/allocator_manager.go index 1f5bce04583..56ee8313d57 100644 --- a/pkg/tso/allocator_manager.go +++ b/pkg/tso/allocator_manager.go @@ -715,8 +715,7 @@ func (am *AllocatorManager) AllocatorDaemon(ctx context.Context) { } tsTicker := time.NewTicker(am.updatePhysicalInterval) failpoint.Inject("fastUpdatePhysicalInterval", func() { - tsTicker.Stop() - tsTicker = time.NewTicker(time.Millisecond) + tsTicker.Reset(time.Millisecond) }) defer tsTicker.Stop() checkerTicker := time.NewTicker(PriorityCheck) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index fdeffd15e9c..ef6d45203d8 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -396,8 +396,7 @@ func (c *RaftCluster) runServiceCheckJob() { ticker := time.NewTicker(serviceCheckInterval) failpoint.Inject("highFrequencyClusterJobs", func() { - ticker.Stop() - ticker = time.NewTicker(time.Millisecond) + ticker.Reset(time.Millisecond) }) defer ticker.Stop() @@ -675,8 +674,7 @@ func (c *RaftCluster) runMetricsCollectionJob() { ticker := time.NewTicker(metricsCollectionJobInterval) failpoint.Inject("highFrequencyClusterJobs", func() { - ticker.Stop() - ticker = time.NewTicker(time.Millisecond) + ticker.Reset(time.Millisecond) }) defer ticker.Stop() @@ -699,8 +697,7 @@ func (c *RaftCluster) runNodeStateCheckJob() { ticker := time.NewTicker(nodeStateCheckJobInterval) failpoint.Inject("highFrequencyClusterJobs", func() { - ticker.Stop() - ticker = time.NewTicker(2 * time.Second) + ticker.Reset(2 * time.Second) }) defer ticker.Stop() diff --git a/server/cluster/scheduling_controller.go b/server/cluster/scheduling_controller.go index 20a36a6817d..5d617700804 100644 --- a/server/cluster/scheduling_controller.go +++ b/server/cluster/scheduling_controller.go @@ -150,8 +150,7 @@ func (sc *schedulingController) runSchedulingMetricsCollectionJob() { ticker := time.NewTicker(metricsCollectionJobInterval) failpoint.Inject("highFrequencyClusterJobs", func() { - ticker.Stop() - ticker = time.NewTicker(time.Millisecond) + ticker.Reset(time.Millisecond) }) defer ticker.Stop() diff --git a/tools/pd-api-bench/cases/controller.go b/tools/pd-api-bench/cases/controller.go index a77474db3a7..75c3c25f7ab 100644 --- a/tools/pd-api-bench/cases/controller.go +++ b/tools/pd-api-bench/cases/controller.go @@ -224,7 +224,7 @@ func (c *httpController) run() { for i := int64(0); i < burst; i++ { go func() { defer c.wg.Done() - var ticker = time.NewTicker(tt) + ticker := time.NewTicker(tt) defer ticker.Stop() for { select { @@ -293,7 +293,7 @@ func (c *gRPCController) run() { for i := int64(0); i < burst; i++ { go func() { defer c.wg.Done() - var ticker = time.NewTicker(tt) + ticker := time.NewTicker(tt) defer ticker.Stop() for { select { @@ -367,7 +367,7 @@ func (c *etcdController) run() { for i := int64(0); i < burst; i++ { go func() { defer c.wg.Done() - var ticker = time.NewTicker(tt) + ticker := time.NewTicker(tt) defer ticker.Stop() for { select { diff --git a/tools/pd-heartbeat-bench/main.go b/tools/pd-heartbeat-bench/main.go index cfa0495c31c..e77f0797bef 100644 --- a/tools/pd-heartbeat-bench/main.go +++ b/tools/pd-heartbeat-bench/main.go @@ -167,7 +167,7 @@ func putStores(ctx context.Context, cfg *config.Config, cli pdpb.PDClient, store log.Fatal("failed to put store", zap.Uint64("store-id", i), zap.String("err", resp.GetHeader().GetError().String())) } go func(ctx context.Context, storeID uint64) { - var heartbeatTicker = time.NewTicker(10 * time.Second) + heartbeatTicker := time.NewTicker(10 * time.Second) defer heartbeatTicker.Stop() for { select { @@ -526,9 +526,9 @@ func main() { header := &pdpb.RequestHeader{ ClusterId: clusterID, } - var heartbeatTicker = time.NewTicker(regionReportInterval * time.Second) + heartbeatTicker := time.NewTicker(regionReportInterval * time.Second) defer heartbeatTicker.Stop() - var resolvedTSTicker = time.NewTicker(time.Second) + resolvedTSTicker := time.NewTicker(time.Second) defer resolvedTSTicker.Stop() withMetric := metrics.InitMetric2Collect(cfg.MetricsAddr) for {