Skip to content

Commit 6929c8e

Browse files
authored
chore(core): use Tick() instead of NewTicker() (#9548)
**Description** - Replaces all uses of `time.NewTicker()` with `time.Tick()`. Since Go 1.23 the garbage collector can automatically clean up unreferenced tickers. Therefore, we can just use `Tick()` instead of `NewTicker()` and remove all `defer ticker.Stop()`. - Removes all calls to stop a timer. Similar to tickers, since Go 1.23 timers can also be garbage collected automatically without calling `timer.Stop()`. - Remove two ticker member variables that were only used in a single function. - Changes the function `PollTillPassOrTimeout()` to use the `timeout` argument
1 parent a3d33ee commit 6929c8e

31 files changed

+126
-180
lines changed

conn/node.go

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -150,10 +150,9 @@ func (n *Node) ReportRaftComms() {
150150
if !glog.V(3) {
151151
return
152152
}
153-
ticker := time.NewTicker(time.Second)
154-
defer ticker.Stop()
153+
ticker := time.Tick(time.Second)
155154

156-
for range ticker.C {
155+
for range ticker {
157156
out := atomic.SwapInt64(&n.heartbeatsOut, 0)
158157
in := atomic.SwapInt64(&n.heartbeatsIn, 0)
159158
glog.Infof("RaftComm: [%#x] Heartbeats out: %d, in: %d", n.Id, out, in)
@@ -401,11 +400,10 @@ func (n *Node) streamMessages(to uint64, s *stream) {
401400
// Let's set the deadline to 10s because if we increase it, then it takes longer to recover from
402401
// a partition and get a new leader.
403402
deadline := time.Now().Add(10 * time.Second)
404-
ticker := time.NewTicker(time.Second)
405-
defer ticker.Stop()
403+
ticker := time.Tick(time.Second)
406404

407405
var logged int
408-
for range ticker.C { // Don't do this in an busy-wait loop, use a ticker.
406+
for range ticker { // Don't do this in a busy-wait loop, use a ticker.
409407
// doSendMessage would block doing a stream. So, time.Now().After is
410408
// only there to avoid a busy-wait.
411409
if err := n.doSendMessage(to, s.msgCh); err != nil {
@@ -460,11 +458,8 @@ func (n *Node) doSendMessage(to uint64, msgCh chan []byte) error {
460458

461459
ctx = mc.Context()
462460

463-
fastTick := time.NewTicker(5 * time.Second)
464-
defer fastTick.Stop()
465-
466-
ticker := time.NewTicker(3 * time.Minute)
467-
defer ticker.Stop()
461+
fastTick := time.Tick(5 * time.Second)
462+
ticker := time.Tick(3 * time.Minute)
468463

469464
for {
470465
select {
@@ -495,7 +490,7 @@ func (n *Node) doSendMessage(to uint64, msgCh chan []byte) error {
495490
// RAFT would automatically retry.
496491
return err
497492
}
498-
case <-fastTick.C:
493+
case <-fastTick:
499494
// We use this ticker, because during network partitions, mc.Send is
500495
// unable to actually send packets, and also does not complain about
501496
// them. We could have potentially used the separately tracked
@@ -512,7 +507,7 @@ func (n *Node) doSendMessage(to uint64, msgCh chan []byte) error {
512507
pool.SetUnhealthy()
513508
return errors.Wrapf(err, "while calling IsPeer %x", to)
514509
}
515-
case <-ticker.C:
510+
case <-ticker:
516511
if lastPackets == packets {
517512
span.AddEvent(fmt.Sprintf("No activity for a while [Packets == %d]. Closing connection.", packets))
518513
return mc.CloseSend()

conn/node_test.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,12 +22,11 @@ import (
2222
)
2323

2424
func (n *Node) run(wg *sync.WaitGroup) {
25-
ticker := time.NewTicker(20 * time.Millisecond)
26-
defer ticker.Stop()
25+
ticker := time.Tick(20 * time.Millisecond)
2726

2827
for {
2928
select {
30-
case <-ticker.C:
29+
case <-ticker:
3130
n.Raft().Tick()
3231
case rd := <-n.Raft().Ready():
3332
n.SaveToStorage(&rd.HardState, rd.Entries, &rd.Snapshot)

conn/raft_server.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -266,9 +266,6 @@ func (w *RaftServer) RaftMessage(server pb.Raft_RaftMessageServer) error {
266266
// Heartbeat rpc call is used to check connection with other workers after worker
267267
// tcp server for this instance starts.
268268
func (w *RaftServer) Heartbeat(_ *api.Payload, stream pb.Raft_HeartbeatServer) error {
269-
ticker := time.NewTicker(echoDuration)
270-
defer ticker.Stop()
271-
272269
node := w.GetNode()
273270
if node == nil {
274271
return ErrNoNode
@@ -287,13 +284,14 @@ func (w *RaftServer) Heartbeat(_ *api.Payload, stream pb.Raft_HeartbeatServer) e
287284
}
288285

289286
ctx := stream.Context()
287+
ticker := time.Tick(echoDuration)
290288

291289
for {
292290
info.Uptime = int64(time.Since(node.StartTime) / time.Second)
293291
select {
294292
case <-ctx.Done():
295293
return ctx.Err()
296-
case <-ticker.C:
294+
case <-ticker:
297295
if err := stream.Send(&info); err != nil {
298296
return err
299297
}

contrib/jepsen/main.go

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -237,18 +237,15 @@ func jepsenServe() error {
237237
// lein run serve runs indefinitely, so there's no need to wait for the
238238
// command to finish.
239239
_ = cmd.Start()
240-
ticker := time.NewTicker(time.Second)
241-
defer ticker.Stop()
242-
240+
ticker := time.Tick(time.Second)
243241
timeout := time.NewTimer(5 * time.Minute)
244-
defer timeout.Stop()
245242
for {
246243
select {
247244
case <-timeout.C:
248245
wg.Done()
249246
errCh <- errors.New("lein run serve couldn't run after 5 minutes")
250247
return
251-
case <-ticker.C:
248+
case <-ticker:
252249
if err := checkServing(); err == nil {
253250
wg.Done()
254251
errCh <- nil

dgraph/cmd/bulk/progress.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,13 +56,12 @@ func (p *progress) setPhase(ph phase) {
5656
}
5757

5858
func (p *progress) report() {
59-
t := time.NewTicker(time.Second)
60-
defer t.Stop()
59+
t := time.Tick(time.Second)
6160

6261
z.StatsPrint() // Just print once.
6362
for {
6463
select {
65-
case <-t.C:
64+
case <-t:
6665
p.reportOnce()
6766
case <-p.shutdown:
6867
p.shutdown <- struct{}{}

dgraph/cmd/bulk/reduce.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -478,8 +478,7 @@ func (r *reducer) reduce(partitionKeys [][]byte, mapItrs []*mapIterator, ci *cou
478478
writerCh <- req
479479
}
480480

481-
ticker := time.NewTicker(time.Minute)
482-
defer ticker.Stop()
481+
ticker := time.Tick(time.Minute)
483482

484483
buffers := make(chan *z.Buffer, 3)
485484

@@ -502,7 +501,7 @@ func (r *reducer) reduce(partitionKeys [][]byte, mapItrs []*mapIterator, ci *cou
502501

503502
hd.Update(int64(cbuf.LenNoPadding()))
504503
select {
505-
case <-ticker.C:
504+
case <-ticker:
506505
fmt.Printf("Histogram of buffer sizes: %s\n", hd.String())
507506
default:
508507
}

dgraph/cmd/increment/increment_test.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,6 @@ func TestBestEffortOnly(t *testing.T) {
203203
}()
204204

205205
timer := time.NewTimer(15 * time.Second)
206-
defer timer.Stop()
207206

208207
select {
209208
case <-timer.C:

dgraph/cmd/live/batch.go

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,6 @@ type loader struct {
5454

5555
dc *dgo.Dgraph
5656
alloc *xidmap.XidMap
57-
ticker *time.Ticker
5857
db *badger.DB
5958
requestsWg sync.WaitGroup
6059
// If we retry a request, we add one to retryRequestsWg.
@@ -414,8 +413,7 @@ func (l *loader) makeRequests() {
414413
buffer = buffer[:i]
415414
}
416415

417-
t := time.NewTicker(5 * time.Second)
418-
defer t.Stop()
416+
t := time.Tick(5 * time.Second)
419417

420418
loop:
421419
for {
@@ -431,7 +429,7 @@ loop:
431429
buffer = append(buffer, req)
432430
}
433431

434-
case <-t.C:
432+
case <-t:
435433
for {
436434
drain()
437435
if len(buffer) < l.opts.bufferSize {
@@ -462,11 +460,11 @@ loop:
462460

463461
func (l *loader) printCounters() {
464462
period := 5 * time.Second
465-
l.ticker = time.NewTicker(period)
463+
ticker := time.Tick(period)
466464
start := time.Now()
467465

468466
var last Counter
469-
for range l.ticker.C {
467+
for range ticker {
470468
counter := l.Counter()
471469
rate := float64(counter.Nquads-last.Nquads) / period.Seconds()
472470
elapsed := time.Since(start).Round(time.Second)

dgraph/cmd/zero/oracle.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -184,8 +184,7 @@ func (o *Oracle) removeSubscriber(id int) {
184184
// and sends the delta object to each subscriber's channel
185185
func (o *Oracle) sendDeltasToSubscribers() {
186186
delta := &pb.OracleDelta{}
187-
ticker := time.NewTicker(time.Second)
188-
defer ticker.Stop()
187+
ticker := time.Tick(time.Second)
189188

190189
// waitFor calculates the maximum value of delta.MaxAssigned and all the CommitTs of delta.Txns
191190
waitFor := func() uint64 {
@@ -201,7 +200,7 @@ func (o *Oracle) sendDeltasToSubscribers() {
201200
var update *pb.OracleDelta
202201
select {
203202
case update = <-o.updates:
204-
case <-ticker.C:
203+
case <-ticker:
205204
wait := waitFor()
206205
if wait == 0 || o.doneUntil.DoneUntil() < wait {
207206
goto get_update

dgraph/cmd/zero/raft.go

Lines changed: 8 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -690,12 +690,11 @@ func (n *node) initAndStartNode() error {
690690

691691
func (n *node) updateZeroMembershipPeriodically(closer *z.Closer) {
692692
defer closer.Done()
693-
ticker := time.NewTicker(10 * time.Second)
694-
defer ticker.Stop()
693+
ticker := time.Tick(10 * time.Second)
695694

696695
for {
697696
select {
698-
case <-ticker.C:
697+
case <-ticker:
699698
n.server.updateZeroLeader()
700699
case <-closer.HasBeenClosed():
701700
return
@@ -705,8 +704,7 @@ func (n *node) updateZeroMembershipPeriodically(closer *z.Closer) {
705704

706705
func (n *node) checkQuorum(closer *z.Closer) {
707706
defer closer.Done()
708-
ticker := time.NewTicker(time.Second)
709-
defer ticker.Stop()
707+
ticker := time.Tick(time.Second)
710708

711709
quorum := func() {
712710
// Make this timeout 1.5x the timeout on RunReadIndexLoop.
@@ -733,7 +731,7 @@ func (n *node) checkQuorum(closer *z.Closer) {
733731

734732
for {
735733
select {
736-
case <-ticker.C:
734+
case <-ticker:
737735
// Only the leader needs to check for the quorum. The quorum is
738736
// used by a leader to identify if it is behind a network partition.
739737
if n.amLeader() {
@@ -747,12 +745,11 @@ func (n *node) checkQuorum(closer *z.Closer) {
747745

748746
func (n *node) snapshotPeriodically(closer *z.Closer) {
749747
defer closer.Done()
750-
ticker := time.NewTicker(time.Minute)
751-
defer ticker.Stop()
748+
ticker := time.Tick(time.Minute)
752749

753750
for {
754751
select {
755-
case <-ticker.C:
752+
case <-ticker:
756753
if err := n.calculateAndProposeSnapshot(); err != nil {
757754
glog.Errorf("While calculateAndProposeSnapshot: %v", err)
758755
}
@@ -873,8 +870,7 @@ func (n *node) Run() {
873870
lastLead := uint64(math.MaxUint64)
874871

875872
var leader bool
876-
ticker := time.NewTicker(tickDur)
877-
defer ticker.Stop()
873+
ticker := time.Tick(tickDur)
878874

879875
// snapshot can cause select loop to block while deleting entries, so run
880876
// it in goroutine
@@ -911,7 +907,7 @@ func (n *node) Run() {
911907
case <-n.closer.HasBeenClosed():
912908
n.Raft().Stop()
913909
return
914-
case <-ticker.C:
910+
case <-ticker:
915911
n.Raft().Tick()
916912
case rd := <-n.Raft().Ready():
917913
timer.Start()

0 commit comments

Comments
 (0)