Skip to content

Commit f905f11

Browse files
authored
go 1.24 (#4869)
* go 1.24 * Fixed params to Fatal and Msg * feat(go version) - update the golang version to the 1.24.2
1 parent 0acd42a commit f905f11

File tree

22 files changed

+51
-50
lines changed

22 files changed

+51
-50
lines changed

.travis.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ os: linux
22
dist: jammy
33
language: go
44
go:
5-
- 1.22.5
5+
- 1.24.2
66
go_import_path: github.com/harmony-one/harmony
77
cache:
88
directories:

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
FROM ubuntu:22.04
22

33
ARG TARGETARCH
4-
ARG GOLANG_VERSION="1.22.5"
4+
ARG GOLANG_VERSION="1.24.2"
55

66
SHELL ["/bin/bash", "-c"]
77

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -256,3 +256,4 @@ debug-delete-log:
256256
docker volume rm logs_aggregator_loki_data
257257
@echo "[WARN] - it needs sudo to remove folder created with loki docker image user"
258258
sudo rm -rf test/logs_aggregator/loki
259+

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ http://api.hmny.io/
1616

1717
## Requirements
1818

19-
### **Go 1.22.5**
19+
### **Go 1.24.2**
2020
### **GMP and OpenSSL**
2121

2222
On macOS:

accounts/abi/unpack_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -909,7 +909,7 @@ func TestOOMMaliciousInput(t *testing.T) {
909909
}
910910
encb, err := hex.DecodeString(test.enc)
911911
if err != nil {
912-
t.Fatalf("invalid hex: %s" + test.enc)
912+
t.Fatalf("invalid hex: %s", test.enc)
913913
}
914914
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
915915
if err == nil {

api/service/legacysync/downloader/Proto.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
FROM golang:1.22.5-bullseye
1+
FROM golang:1.24.2-bullseye
22

33
RUN apt update
44
RUN apt install -y protobuf-compiler

api/service/stagedstreamsync/stage_heads.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock
8383
Uint64("currentHeight", currentHeight).
8484
Uint64("maxPeersHeight", maxHeight).
8585
Uint64("targetHeight", targetHeight).
86-
Msgf(WrapStagedSyncMsg("current height is ahead of target height, target height is readjusted to max peers height"))
86+
Msg(WrapStagedSyncMsg("current height is ahead of target height, target height is readjusted to max peers height"))
8787
targetHeight = maxHeight
8888
}
8989

@@ -108,7 +108,7 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock
108108
if err := s.Update(tx, targetHeight); err != nil {
109109
heads.configs.logger.Error().
110110
Err(err).
111-
Msgf(WrapStagedSyncMsg("saving progress for headers stage failed"))
111+
Msg(WrapStagedSyncMsg("saving progress for headers stage failed"))
112112
return err
113113
}
114114

api/service/stagedstreamsync/staged_stream_sync.go

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ func (sss *StagedStreamSync) RevertTo(revertPoint uint64, invalidBlockNumber uin
225225
Interface("invalidBlockHash", invalidBlockHash).
226226
Interface("invalidBlockStreamID", invalidBlockStreamID).
227227
Uint64("revertPoint", revertPoint).
228-
Msgf(WrapStagedSyncMsg("Reverting blocks"))
228+
Msg(WrapStagedSyncMsg("Reverting blocks"))
229229
sss.revertPoint = &revertPoint
230230
if invalidBlockNumber > 0 || invalidBlockHash != (common.Hash{}) {
231231
resetBadStreams := !sss.invalidBlock.Active
@@ -286,7 +286,7 @@ func (sss *StagedStreamSync) cleanUp(ctx context.Context, fromStage int, db kv.R
286286
if err := sss.pruneStage(ctx, firstCycle, sss.pruningOrder[i], db, tx); err != nil {
287287
sss.logger.Error().Err(err).
288288
Interface("stage id", sss.pruningOrder[i].ID).
289-
Msgf(WrapStagedSyncMsg("stage cleanup failed"))
289+
Msg(WrapStagedSyncMsg("stage cleanup failed"))
290290
panic(err)
291291
}
292292
}
@@ -431,7 +431,7 @@ func (sss *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, fi
431431
sss.logger.Error().
432432
Err(err).
433433
Interface("stage id", sss.revertOrder[j].ID).
434-
Msgf(WrapStagedSyncMsg("revert stage failed"))
434+
Msg(WrapStagedSyncMsg("revert stage failed"))
435435
return err
436436
}
437437
}
@@ -446,7 +446,7 @@ func (sss *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, fi
446446

447447
if stage.Disabled {
448448
sss.logger.Trace().
449-
Msgf(WrapStagedSyncMsg(fmt.Sprintf("%s disabled. %s", stage.ID, stage.DisabledDescription)))
449+
Msg(WrapStagedSyncMsg(fmt.Sprintf("%s disabled. %s", stage.ID, stage.DisabledDescription)))
450450

451451
sss.NextStage()
452452
continue
@@ -461,7 +461,7 @@ func (sss *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, fi
461461
sss.logger.Error().
462462
Err(err).
463463
Interface("stage id", stage.ID).
464-
Msgf(WrapStagedSyncMsg("stage failed"))
464+
Msg(WrapStagedSyncMsg("stage failed"))
465465
return err
466466
}
467467
sss.NextStage()
@@ -470,7 +470,7 @@ func (sss *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, fi
470470
if err := sss.cleanUp(ctx, 0, db, tx, firstCycle); err != nil {
471471
sss.logger.Error().
472472
Err(err).
473-
Msgf(WrapStagedSyncMsg("stages cleanup failed"))
473+
Msg(WrapStagedSyncMsg("stages cleanup failed"))
474474
return err
475475
}
476476
if err := sss.SetCurrentStage(sss.stages[0].ID); err != nil {
@@ -564,7 +564,7 @@ func printLogs(tx kv.RwTx, timings []Timing) error {
564564
}
565565
if len(logCtx) > 0 {
566566
timingLog := fmt.Sprintf("Timings (slower than 50ms) %v", logCtx)
567-
utils.Logger().Info().Msgf(WrapStagedSyncMsg(timingLog))
567+
utils.Logger().Info().Msg(WrapStagedSyncMsg(timingLog))
568568
}
569569

570570
if tx == nil {
@@ -582,7 +582,7 @@ func printLogs(tx kv.RwTx, timings []Timing) error {
582582
bucketSizes = append(bucketSizes, bucket, ByteCount(sz))
583583
}
584584
utils.Logger().Info().
585-
Msgf(WrapStagedSyncMsg(fmt.Sprintf("Tables %v", bucketSizes...)))
585+
Msg(WrapStagedSyncMsg(fmt.Sprintf("Tables %v", bucketSizes...)))
586586
}
587587
tx.CollectMetrics()
588588
return nil
@@ -599,15 +599,15 @@ func (sss *StagedStreamSync) runStage(ctx context.Context, stage *Stage, db kv.R
599599
sss.logger.Error().
600600
Err(err).
601601
Interface("stage id", stage.ID).
602-
Msgf(WrapStagedSyncMsg("stage failed"))
602+
Msg(WrapStagedSyncMsg("stage failed"))
603603
return fmt.Errorf("[%s] %w", sss.LogPrefix(), err)
604604
}
605605

606606
took := time.Since(start)
607607
if took > 60*time.Second {
608608
logPrefix := sss.LogPrefix()
609609
sss.logger.Info().
610-
Msgf(WrapStagedSyncMsg(fmt.Sprintf("%s: DONE in %d", logPrefix, took)))
610+
Msg(WrapStagedSyncMsg(fmt.Sprintf("%s: DONE in %d", logPrefix, took)))
611611

612612
}
613613
sss.timings = append(sss.timings, Timing{stage: stage.ID, took: took})
@@ -641,7 +641,7 @@ func (sss *StagedStreamSync) revertStage(ctx context.Context, firstCycle bool, s
641641
if took > 60*time.Second {
642642
logPrefix := sss.LogPrefix()
643643
sss.logger.Info().
644-
Msgf(WrapStagedSyncMsg(fmt.Sprintf("%s: Revert done in %d", logPrefix, took)))
644+
Msg(WrapStagedSyncMsg(fmt.Sprintf("%s: Revert done in %d", logPrefix, took)))
645645
}
646646
sss.timings = append(sss.timings, Timing{isRevert: true, stage: stage.ID, took: took})
647647
return nil
@@ -673,7 +673,7 @@ func (sss *StagedStreamSync) pruneStage(ctx context.Context, firstCycle bool, st
673673
if took > 60*time.Second {
674674
logPrefix := sss.LogPrefix()
675675
sss.logger.Info().
676-
Msgf(WrapStagedSyncMsg(fmt.Sprintf("%s: CleanUp done in %d", logPrefix, took)))
676+
Msg(WrapStagedSyncMsg(fmt.Sprintf("%s: CleanUp done in %d", logPrefix, took)))
677677
}
678678
sss.timings = append(sss.timings, Timing{isCleanUp: true, stage: stage.ID, took: took})
679679
return nil

api/service/stagedstreamsync/syncing.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ func (s *StagedStreamSync) doSyncCycle(ctx context.Context) (int, error) {
450450
Bool("isBeaconShard", s.isBeaconShard).
451451
Uint32("shard", s.bc.ShardID()).
452452
Uint64("currentHeight", startHead).
453-
Msgf(WrapStagedSyncMsg("sync cycle failed"))
453+
Msg(WrapStagedSyncMsg("sync cycle failed"))
454454
return totalInserted, err
455455
}
456456

consensus/consensus.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -256,7 +256,7 @@ func (consensus *Consensus) getLeaderPrivateKey(leaderKey *bls_core.PublicKey) (
256256
return &consensus.priKey[i], nil
257257
}
258258
}
259-
return nil, errors.Wrapf(errLeaderPriKeyNotFound, leaderKey.SerializeToHexStr())
259+
return nil, errors.Wrap(errLeaderPriKeyNotFound, leaderKey.SerializeToHexStr())
260260
}
261261

262262
// getConsensusLeaderPrivateKey returns consensus leader private key if node is the leader

0 commit comments

Comments
 (0)