mirror of
https://github.com/ethereum/go-ethereum.git
synced 2026-04-14 05:38:37 +00:00
Compare commits
382 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01e33d14be | ||
|
|
527ea11e50 | ||
|
|
4da1e29320 | ||
|
|
289826fefb | ||
|
|
5b7511eeed | ||
|
|
7d463aedd3 | ||
|
|
f7f57d29d4 | ||
|
|
ecae519972 | ||
|
|
735bfd121a | ||
|
|
6333855163 | ||
|
|
deda47f6a1 | ||
|
|
f71a884e37 | ||
|
|
ea5448814f | ||
|
|
58557cb463 | ||
|
|
3772bb536a | ||
|
|
68c7058a80 | ||
|
|
21b19362c2 | ||
|
|
a8ea6319f1 | ||
|
|
04e40995d9 | ||
|
|
9878ef926d | ||
|
|
0bafb29490 | ||
|
|
52b8c09fdf | ||
|
|
b5d322000c | ||
|
|
bd6530a1d4 | ||
|
|
44257950f1 | ||
|
|
d8cb8a962b | ||
|
|
a608ac94ec | ||
|
|
00da4f51ff | ||
|
|
0ba4314321 | ||
|
|
bcb0efd756 | ||
|
|
db6c7d06a2 | ||
|
|
14a26d9ccc | ||
|
|
fc43170cdd | ||
|
|
92b4cb2663 | ||
|
|
3da517e239 | ||
|
|
dc3794e3dc | ||
|
|
965bd6b6a0 | ||
|
|
fe47c39903 | ||
|
|
be4dc0c4be | ||
|
|
95705e8b7b | ||
|
|
ceabc39304 | ||
|
|
e585ad3b42 | ||
|
|
d1369b69f5 | ||
|
|
bd3c8431d9 | ||
|
|
a2496852e9 | ||
|
|
c3467dd8b5 | ||
|
|
acdd139717 | ||
|
|
1b3b028d1d | ||
|
|
8a3a309fa9 | ||
|
|
5d0e18f775 | ||
|
|
8f9061f937 | ||
|
|
e951bcbff7 | ||
|
|
745b0a8c09 | ||
|
|
b87340a856 | ||
|
|
a61e5ccb1e | ||
|
|
e23b0cbc22 | ||
|
|
305cd7b9eb | ||
|
|
77779d1098 | ||
|
|
59ce2cb6a1 | ||
|
|
35b91092c5 | ||
|
|
fd859638bd | ||
|
|
a3083ff5d0 | ||
|
|
4faadf17fb | ||
|
|
3341d8ace0 | ||
|
|
b35645bdf7 | ||
|
|
6ae3f9fa56 | ||
|
|
6138a11c39 | ||
|
|
b6115e9a30 | ||
|
|
ab357151da | ||
|
|
9b2ce121dc | ||
|
|
fc1b0c0b83 | ||
|
|
519a450c43 | ||
|
|
4b915af2c3 | ||
|
|
98b13f342f | ||
|
|
a7d09cc14f | ||
|
|
77e7e5ad1a | ||
|
|
24025c2bd0 | ||
|
|
ede376af8e | ||
|
|
189f9d0b17 | ||
|
|
dba741fd31 | ||
|
|
eaa9418ac1 | ||
|
|
1c9ddee16f | ||
|
|
95b9a2ed77 | ||
|
|
de0a452f7d | ||
|
|
7d13acd030 | ||
|
|
59512b1849 | ||
|
|
3c20e08cba | ||
|
|
88f8549d37 | ||
|
|
32f05d68a2 | ||
|
|
f6068e3fb2 | ||
|
|
27c4ca9df0 | ||
|
|
aa417b03a6 | ||
|
|
91cec92bf3 | ||
|
|
b8a3fa7d06 | ||
|
|
b08aac1dbc | ||
|
|
00540f9469 | ||
|
|
e15d4ccc01 | ||
|
|
0d043d071e | ||
|
|
ecee64ecdc | ||
|
|
3f1871524f | ||
|
|
a0fb8102fe | ||
|
|
344ce84a43 | ||
|
|
ce64ab44ed | ||
|
|
fc8c10476d | ||
|
|
402c71f2e2 | ||
|
|
28dad943f6 | ||
|
|
6d0dd08860 | ||
|
|
dd202d4283 | ||
|
|
814edc5308 | ||
|
|
6d99759f01 | ||
|
|
fe3a74e610 | ||
|
|
4f75049ea0 | ||
|
|
773f71bb9e | ||
|
|
856e4d55d8 | ||
|
|
db7d3a4e0e | ||
|
|
16783c167c | ||
|
|
9962e2c9f3 | ||
|
|
d318e8eba9 | ||
|
|
b25080cac0 | ||
|
|
48cfc97776 | ||
|
|
1eead2ec33 | ||
|
|
2726c9ef9e | ||
|
|
5695fbc156 | ||
|
|
825436f043 | ||
|
|
723aae2b4e | ||
|
|
cee751a1ed | ||
|
|
95c6b05806 | ||
|
|
7793e00f0d | ||
|
|
1b1133d669 | ||
|
|
be92f5487e | ||
|
|
8a4345611d | ||
|
|
f811bfe4fd | ||
|
|
406a852ec8 | ||
|
|
2a45272408 | ||
|
|
8450e40798 | ||
|
|
9ecb6c4ae6 | ||
|
|
e636e4e3c1 | ||
|
|
cbf3d8fed2 | ||
|
|
199ac16e07 | ||
|
|
01083736c8 | ||
|
|
59ad40e562 | ||
|
|
c2e1785a48 | ||
|
|
82fad31540 | ||
|
|
1625064c68 | ||
|
|
1d1a094d51 | ||
|
|
e40aa46e88 | ||
|
|
d3dd48e59d | ||
|
|
00cbd2e6f4 | ||
|
|
453d0f9299 | ||
|
|
6d865ccd30 | ||
|
|
54f72c796f | ||
|
|
2a62df3815 | ||
|
|
01fe1d716c | ||
|
|
3eed0580d4 | ||
|
|
1054276906 | ||
|
|
0cf3d3ba4f | ||
|
|
9b78f45e33 | ||
|
|
c709c19b40 | ||
|
|
550ca91b17 | ||
|
|
a4b3898f90 | ||
|
|
0cba803fba | ||
|
|
ad88b68a46 | ||
|
|
c50e5edfaf | ||
|
|
d8b92cb9e6 | ||
|
|
ac85a6f254 | ||
|
|
4f38a76438 | ||
|
|
ece2b19ac0 | ||
|
|
f2869793df | ||
|
|
9426444825 | ||
|
|
995fa79bf5 | ||
|
|
919b238c82 | ||
|
|
3011d83e6f | ||
|
|
341907cdb8 | ||
|
|
30656d714e | ||
|
|
15a9e92bbd | ||
|
|
4d4883731e | ||
|
|
e2d21d0e9c | ||
|
|
986d115da7 | ||
|
|
bbb1ab8d16 | ||
|
|
7faa676b03 | ||
|
|
32a35bfcd3 | ||
|
|
c9b7ae422c | ||
|
|
c12959dc8c | ||
|
|
bc0db302ed | ||
|
|
777265620d | ||
|
|
ad459f4fac | ||
|
|
e64c8d8e26 | ||
|
|
9967fb7c5c | ||
|
|
aa457eda4b | ||
|
|
14c2408957 | ||
|
|
7b7be249cb | ||
|
|
6b82cef68f | ||
|
|
bba41f8072 | ||
|
|
8e1de223ad | ||
|
|
54a91b3ad8 | ||
|
|
b9288765a3 | ||
|
|
19f37003fb | ||
|
|
16a6531ac2 | ||
|
|
6530945dcd | ||
|
|
a951aacb70 | ||
|
|
a5e6a157e5 | ||
|
|
cb97c48cb6 | ||
|
|
845009f684 | ||
|
|
a179ccf6f0 | ||
|
|
c974722dc0 | ||
|
|
9a6905318a | ||
|
|
628ff79be3 | ||
|
|
7046e63244 | ||
|
|
2513feddf8 | ||
|
|
424bc22ab8 | ||
|
|
1e9dfd5bb0 | ||
|
|
0a8fd6841c | ||
|
|
3d05284928 | ||
|
|
344d01e2be | ||
|
|
56be36f672 | ||
|
|
181a3ae9e0 | ||
|
|
e250836973 | ||
|
|
c2595381bf | ||
|
|
9a8e14e77e | ||
|
|
251b863107 | ||
|
|
1022c7637d | ||
|
|
35922bcd33 | ||
|
|
8fad02ac63 | ||
|
|
54ab4e3c7d | ||
|
|
2eb1ccc6c4 | ||
|
|
46d804776b | ||
|
|
d58f6291a2 | ||
|
|
d0af257aa2 | ||
|
|
500931bc82 | ||
|
|
ef815c59a2 | ||
|
|
e78be59dc9 | ||
|
|
0495350388 | ||
|
|
3d78da9171 | ||
|
|
add1890a57 | ||
|
|
588dd94aad | ||
|
|
715bf8e81e | ||
|
|
b6fb79cdf9 | ||
|
|
23c3498836 | ||
|
|
9ba13b6097 | ||
|
|
494908a852 | ||
|
|
e3e556b266 | ||
|
|
a9acb3ff93 | ||
|
|
94710f79a2 | ||
|
|
3b17e78274 | ||
|
|
5b99d2bba4 | ||
|
|
ea4935430b | ||
|
|
5a1990d1d8 | ||
|
|
1278b4891d | ||
|
|
31d5d82ce5 | ||
|
|
c890637af9 | ||
|
|
127d1f42bb | ||
|
|
7cd400612f | ||
|
|
4eb5b66d9e | ||
|
|
b993cb6f38 | ||
|
|
f51870e40e | ||
|
|
52f998d5ec | ||
|
|
d5efd34010 | ||
|
|
a32851fac9 | ||
|
|
64d22fd7f7 | ||
|
|
9623dcbca2 | ||
|
|
01b39c96bf | ||
|
|
957a3602d9 | ||
|
|
710008450f | ||
|
|
eaaa5b716d | ||
|
|
a8a4804895 | ||
|
|
de5ea2ffd8 | ||
|
|
b635e0632c | ||
|
|
32fea008d8 | ||
|
|
b2843a11d6 | ||
|
|
25439aac04 | ||
|
|
52ae75afcd | ||
|
|
d9aaab13d3 | ||
|
|
b3e7d9ee44 | ||
|
|
b84097d22e | ||
|
|
3f641dba87 | ||
|
|
57f84866bc | ||
|
|
b9702ed27b | ||
|
|
4531bfebec | ||
|
|
27b3a6087e | ||
|
|
2e5cd21edf | ||
|
|
bf141fbfb1 | ||
|
|
dd7daace9d | ||
|
|
5dfcffcf3c | ||
|
|
bd77b77ede | ||
|
|
ffe9dc97e5 | ||
|
|
7aae33eacf | ||
|
|
6978ab48aa | ||
|
|
15f52a2937 | ||
|
|
e20b05ec7f | ||
|
|
a9eaf2ffd8 | ||
|
|
3a5560fa98 | ||
|
|
16f50285b7 | ||
|
|
472e3a24ac | ||
|
|
56d201b0fe | ||
|
|
1b702f71d9 | ||
|
|
b98b255449 | ||
|
|
13a8798fa3 | ||
|
|
1ce71a1895 | ||
|
|
215ee6ac18 | ||
|
|
c21fe5475f | ||
|
|
9a346873b8 | ||
|
|
e58c785424 | ||
|
|
228933a660 | ||
|
|
66134b35df | ||
|
|
31f9c9ff75 | ||
|
|
af47d9b472 | ||
|
|
dbca85869f | ||
|
|
e63e37be5e | ||
|
|
73a2df2b0a | ||
|
|
657c99f116 | ||
|
|
129c562900 | ||
|
|
d3679c2f2e | ||
|
|
212967d0e1 | ||
|
|
be94ea1c40 | ||
|
|
042c47ce1a | ||
|
|
da3822dcec | ||
|
|
6f2cbb7a27 | ||
|
|
cd3f9b24e9 | ||
|
|
5d51208334 | ||
|
|
f12f0ec0cd | ||
|
|
fbbaa3c849 | ||
|
|
28376aea78 | ||
|
|
a122dbe459 | ||
|
|
fed8e09ab0 | ||
|
|
f691d661c4 | ||
|
|
f43228152b | ||
|
|
446fdebdc3 | ||
|
|
b3b46ce435 | ||
|
|
795a7ab58a | ||
|
|
aa1a8dacae | ||
|
|
8d1b1c20d0 | ||
|
|
6426257c0f | ||
|
|
9bab01bee4 | ||
|
|
6452b7ad05 | ||
|
|
7805e203f0 | ||
|
|
3bbf5f5b6a | ||
|
|
c55a12197e | ||
|
|
3e48e0779c | ||
|
|
689ea10f35 | ||
|
|
ed4d00fd83 | ||
|
|
1468331f9d | ||
|
|
3f7cd905b0 | ||
|
|
cf93077fab | ||
|
|
62334a9d46 | ||
|
|
960c87a944 | ||
|
|
ebf93555b1 | ||
|
|
b04df226fa | ||
|
|
2a4847a7d1 | ||
|
|
a6191d8272 | ||
|
|
5748dd18e7 | ||
|
|
b5c3b32eeb | ||
|
|
495a1d2b1a | ||
|
|
f8e5b53f88 | ||
|
|
f4817b7a53 | ||
|
|
e0d81d1e99 | ||
|
|
5e6f7374de | ||
|
|
2a2f106a01 | ||
|
|
81c5b43029 | ||
|
|
95273afec4 | ||
|
|
aa36bcd0aa | ||
|
|
488d987fc4 | ||
|
|
fa16c89bfd | ||
|
|
f23d506b7d | ||
|
|
eb8f32588b | ||
|
|
48d708a194 | ||
|
|
12a389f065 | ||
|
|
3d2a4cb053 | ||
|
|
d8f9801305 | ||
|
|
7368b34a4b | ||
|
|
5f4cc3f57d | ||
|
|
ca91254259 | ||
|
|
fbd89be047 | ||
|
|
7755ee3e4f | ||
|
|
ebc7dc9e37 | ||
|
|
d2a5dba48f | ||
|
|
982235f5e0 | ||
|
|
7f9b06e7aa | ||
|
|
6420ee3592 | ||
|
|
15ff378a89 | ||
|
|
395425902d | ||
|
|
19aa8020a9 | ||
|
|
d39af344dc |
587 changed files with 46702 additions and 20970 deletions
1
.github/CODEOWNERS
vendored
1
.github/CODEOWNERS
vendored
|
|
@ -10,6 +10,7 @@ beacon/merkle/ @zsfelfoldi
|
||||||
beacon/types/ @zsfelfoldi @fjl
|
beacon/types/ @zsfelfoldi @fjl
|
||||||
beacon/params/ @zsfelfoldi @fjl
|
beacon/params/ @zsfelfoldi @fjl
|
||||||
cmd/evm/ @MariusVanDerWijden @lightclient
|
cmd/evm/ @MariusVanDerWijden @lightclient
|
||||||
|
cmd/keeper/ @gballet
|
||||||
core/state/ @rjl493456442
|
core/state/ @rjl493456442
|
||||||
crypto/ @gballet @jwasinger @fjl
|
crypto/ @gballet @jwasinger @fjl
|
||||||
core/ @rjl493456442
|
core/ @rjl493456442
|
||||||
|
|
|
||||||
4
.github/workflows/go.yml
vendored
4
.github/workflows/go.yml
vendored
|
|
@ -69,8 +69,8 @@ jobs:
|
||||||
|
|
||||||
- name: Install cross toolchain
|
- name: Install cross toolchain
|
||||||
run: |
|
run: |
|
||||||
apt-get update
|
sudo apt-get update
|
||||||
apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
|
sudo apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
run: go run build/ci.go test -arch 386 -short -p 8
|
run: go run build/ci.go test -arch 386 -short -p 8
|
||||||
|
|
|
||||||
60
.github/workflows/validate_pr.yml
vendored
60
.github/workflows/validate_pr.yml
vendored
|
|
@ -8,10 +8,54 @@ jobs:
|
||||||
validate-pr:
|
validate-pr:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check for Spam PR
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const prTitle = context.payload.pull_request.title;
|
||||||
|
const spamRegex = /^(feat|chore|fix)(\(.*\))?\s*:/i;
|
||||||
|
|
||||||
|
if (spamRegex.test(prTitle)) {
|
||||||
|
// Leave a comment explaining why
|
||||||
|
await github.rest.issues.createComment({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
issue_number: context.payload.pull_request.number,
|
||||||
|
body: `## PR Closed as Spam
|
||||||
|
|
||||||
|
This PR was automatically closed because the title format \`feat:\`, \`fix:\`, or \`chore:\` is commonly associated with spam contributions.
|
||||||
|
|
||||||
|
If this is a legitimate contribution, please:
|
||||||
|
1. Review our contribution guidelines
|
||||||
|
2. Use the correct PR title format: \`directory, ...: description\`
|
||||||
|
3. Open a new PR with the proper title format
|
||||||
|
|
||||||
|
Thank you for your understanding.`
|
||||||
|
});
|
||||||
|
|
||||||
|
// Close the PR
|
||||||
|
await github.rest.pulls.update({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
pull_number: context.payload.pull_request.number,
|
||||||
|
state: 'closed'
|
||||||
|
});
|
||||||
|
|
||||||
|
core.setFailed('PR closed as spam due to suspicious title format');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('✅ PR passed spam check');
|
||||||
|
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Check PR Title Format
|
- name: Check PR Title Format
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
|
const fs = require('fs');
|
||||||
|
const path = require('path');
|
||||||
const prTitle = context.payload.pull_request.title;
|
const prTitle = context.payload.pull_request.title;
|
||||||
const titleRegex = /^([\w\s,{}/.]+): .+/;
|
const titleRegex = /^([\w\s,{}/.]+): .+/;
|
||||||
|
|
||||||
|
|
@ -20,4 +64,20 @@ jobs:
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const match = prTitle.match(titleRegex);
|
||||||
|
const dirPart = match[1];
|
||||||
|
const directories = dirPart.split(',').map(d => d.trim());
|
||||||
|
const missingDirs = [];
|
||||||
|
for (const dir of directories) {
|
||||||
|
const fullPath = path.join(process.env.GITHUB_WORKSPACE, dir);
|
||||||
|
if (!fs.existsSync(fullPath)) {
|
||||||
|
missingDirs.push(dir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (missingDirs.length > 0) {
|
||||||
|
core.setFailed(`The following directories in the PR title do not exist: ${missingDirs.join(', ')}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
console.log('✅ PR title format is valid');
|
console.log('✅ PR title format is valid');
|
||||||
|
|
|
||||||
102
AGENTS.md
Normal file
102
AGENTS.md
Normal file
|
|
@ -0,0 +1,102 @@
|
||||||
|
# AGENTS
|
||||||
|
|
||||||
|
## Guidelines
|
||||||
|
|
||||||
|
- **Keep changes minimal and focused.** Only modify code directly related to the task at hand. Do not refactor unrelated code, rename existing variables or functions for style, or bundle unrelated fixes into the same commit or PR.
|
||||||
|
- **Do not add, remove, or update dependencies** unless the task explicitly requires it.
|
||||||
|
|
||||||
|
## Pre-Commit Checklist
|
||||||
|
|
||||||
|
Before every commit, run **all** of the following checks and ensure they pass:
|
||||||
|
|
||||||
|
### 1. Formatting
|
||||||
|
|
||||||
|
Before committing, always run `gofmt` and `goimports` on all modified files:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
gofmt -w <modified files>
|
||||||
|
goimports -w <modified files>
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Build All Commands
|
||||||
|
|
||||||
|
Verify that all tools compile successfully:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
make all
|
||||||
|
```
|
||||||
|
|
||||||
|
This builds all executables under `cmd/`, including `keeper` which has special build requirements.
|
||||||
|
|
||||||
|
### 3. Tests
|
||||||
|
|
||||||
|
While iterating during development, use `-short` for faster feedback:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go run ./build/ci.go test -short
|
||||||
|
```
|
||||||
|
|
||||||
|
Before committing, run the full test suite **without** `-short` to ensure all tests pass, including the Ethereum execution-spec tests and all state/block test permutations:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go run ./build/ci.go test
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Linting
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go run ./build/ci.go lint
|
||||||
|
```
|
||||||
|
|
||||||
|
This runs additional style checks. Fix any issues before committing.
|
||||||
|
|
||||||
|
### 5. Generated Code
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go run ./build/ci.go check_generate
|
||||||
|
```
|
||||||
|
|
||||||
|
Ensures that all generated files (e.g., `gen_*.go`) are up to date. If this fails, first install the required code generators by running `make devtools`, then run the appropriate `go generate` commands and include the updated files in your commit.
|
||||||
|
|
||||||
|
### 6. Dependency Hygiene
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go run ./build/ci.go check_baddeps
|
||||||
|
```
|
||||||
|
|
||||||
|
Verifies that no forbidden dependencies have been introduced.
|
||||||
|
|
||||||
|
## What to include in commits
|
||||||
|
|
||||||
|
Do not commit binaries, whether they are produced by the main build or byproducts of investigations.
|
||||||
|
|
||||||
|
## Commit Message Format
|
||||||
|
|
||||||
|
Commit messages must be prefixed with the package(s) they modify, followed by a short lowercase description:
|
||||||
|
|
||||||
|
```
|
||||||
|
<package(s)>: description
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `core/vm: fix stack overflow in PUSH instruction`
|
||||||
|
- `eth, rpc: make trace configs optional`
|
||||||
|
- `cmd/geth: add new flag for sync mode`
|
||||||
|
|
||||||
|
Use comma-separated package names when multiple areas are affected. Keep the description concise.
|
||||||
|
|
||||||
|
## Pull Request Title Format
|
||||||
|
|
||||||
|
PR titles follow the same convention as commit messages:
|
||||||
|
|
||||||
|
```
|
||||||
|
<list of modified paths>: description
|
||||||
|
```
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
- `core/vm: fix stack overflow in PUSH instruction`
|
||||||
|
- `core, eth: add arena allocator support`
|
||||||
|
- `cmd/geth, internal/ethapi: refactor transaction args`
|
||||||
|
- `trie/archiver: streaming subtree archival to fix OOM`
|
||||||
|
|
||||||
|
Use the top-level package paths, comma-separated if multiple areas are affected. Only mention the directories with functional changes, interface changes that trickle all over the codebase should not generate an exhaustive list. The description should be a short, lowercase summary of the change.
|
||||||
|
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.24-alpine AS builder
|
FROM golang:1.26-alpine AS builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.24-alpine AS builder
|
FROM golang:1.26-alpine AS builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
|
||||||
|
|
||||||
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
|
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
|
||||||
|
|
||||||
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
|
|
||||||
|
|
||||||
The following key may be used to communicate sensitive information to developers.
|
The following key may be used to communicate sensitive information to developers.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -183,8 +183,11 @@ var (
|
||||||
// Solidity: {{.Original.String}}
|
// Solidity: {{.Original.String}}
|
||||||
func ({{ decapitalise $contract.Type}} *{{$contract.Type}}) Unpack{{.Normalized.Name}}Event(log *types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
|
func ({{ decapitalise $contract.Type}} *{{$contract.Type}}) Unpack{{.Normalized.Name}}Event(log *types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
|
||||||
event := "{{.Original.Name}}"
|
event := "{{.Original.Name}}"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != {{ decapitalise $contract.Type}}.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != {{ decapitalise $contract.Type}}.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new({{$contract.Type}}{{.Normalized.Name}})
|
out := new({{$contract.Type}}{{.Normalized.Name}})
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -360,8 +360,11 @@ func (CrowdsaleFundTransfer) ContractEventName() string {
|
||||||
// Solidity: event FundTransfer(address backer, uint256 amount, bool isContribution)
|
// Solidity: event FundTransfer(address backer, uint256 amount, bool isContribution)
|
||||||
func (crowdsale *Crowdsale) UnpackFundTransferEvent(log *types.Log) (*CrowdsaleFundTransfer, error) {
|
func (crowdsale *Crowdsale) UnpackFundTransferEvent(log *types.Log) (*CrowdsaleFundTransfer, error) {
|
||||||
event := "FundTransfer"
|
event := "FundTransfer"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != crowdsale.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != crowdsale.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(CrowdsaleFundTransfer)
|
out := new(CrowdsaleFundTransfer)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
35
accounts/abi/abigen/testdata/v2/dao.go.txt
vendored
35
accounts/abi/abigen/testdata/v2/dao.go.txt
vendored
|
|
@ -606,8 +606,11 @@ func (DAOChangeOfRules) ContractEventName() string {
|
||||||
// Solidity: event ChangeOfRules(uint256 minimumQuorum, uint256 debatingPeriodInMinutes, int256 majorityMargin)
|
// Solidity: event ChangeOfRules(uint256 minimumQuorum, uint256 debatingPeriodInMinutes, int256 majorityMargin)
|
||||||
func (dAO *DAO) UnpackChangeOfRulesEvent(log *types.Log) (*DAOChangeOfRules, error) {
|
func (dAO *DAO) UnpackChangeOfRulesEvent(log *types.Log) (*DAOChangeOfRules, error) {
|
||||||
event := "ChangeOfRules"
|
event := "ChangeOfRules"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != dAO.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(DAOChangeOfRules)
|
out := new(DAOChangeOfRules)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -648,8 +651,11 @@ func (DAOMembershipChanged) ContractEventName() string {
|
||||||
// Solidity: event MembershipChanged(address member, bool isMember)
|
// Solidity: event MembershipChanged(address member, bool isMember)
|
||||||
func (dAO *DAO) UnpackMembershipChangedEvent(log *types.Log) (*DAOMembershipChanged, error) {
|
func (dAO *DAO) UnpackMembershipChangedEvent(log *types.Log) (*DAOMembershipChanged, error) {
|
||||||
event := "MembershipChanged"
|
event := "MembershipChanged"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != dAO.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(DAOMembershipChanged)
|
out := new(DAOMembershipChanged)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -692,8 +698,11 @@ func (DAOProposalAdded) ContractEventName() string {
|
||||||
// Solidity: event ProposalAdded(uint256 proposalID, address recipient, uint256 amount, string description)
|
// Solidity: event ProposalAdded(uint256 proposalID, address recipient, uint256 amount, string description)
|
||||||
func (dAO *DAO) UnpackProposalAddedEvent(log *types.Log) (*DAOProposalAdded, error) {
|
func (dAO *DAO) UnpackProposalAddedEvent(log *types.Log) (*DAOProposalAdded, error) {
|
||||||
event := "ProposalAdded"
|
event := "ProposalAdded"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != dAO.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(DAOProposalAdded)
|
out := new(DAOProposalAdded)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -736,8 +745,11 @@ func (DAOProposalTallied) ContractEventName() string {
|
||||||
// Solidity: event ProposalTallied(uint256 proposalID, int256 result, uint256 quorum, bool active)
|
// Solidity: event ProposalTallied(uint256 proposalID, int256 result, uint256 quorum, bool active)
|
||||||
func (dAO *DAO) UnpackProposalTalliedEvent(log *types.Log) (*DAOProposalTallied, error) {
|
func (dAO *DAO) UnpackProposalTalliedEvent(log *types.Log) (*DAOProposalTallied, error) {
|
||||||
event := "ProposalTallied"
|
event := "ProposalTallied"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != dAO.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(DAOProposalTallied)
|
out := new(DAOProposalTallied)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -780,8 +792,11 @@ func (DAOVoted) ContractEventName() string {
|
||||||
// Solidity: event Voted(uint256 proposalID, bool position, address voter, string justification)
|
// Solidity: event Voted(uint256 proposalID, bool position, address voter, string justification)
|
||||||
func (dAO *DAO) UnpackVotedEvent(log *types.Log) (*DAOVoted, error) {
|
func (dAO *DAO) UnpackVotedEvent(log *types.Log) (*DAOVoted, error) {
|
||||||
event := "Voted"
|
event := "Voted"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != dAO.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != dAO.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(DAOVoted)
|
out := new(DAOVoted)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -72,8 +72,11 @@ func (EventCheckerDynamic) ContractEventName() string {
|
||||||
// Solidity: event dynamic(string indexed idxStr, bytes indexed idxDat, string str, bytes dat)
|
// Solidity: event dynamic(string indexed idxStr, bytes indexed idxDat, string str, bytes dat)
|
||||||
func (eventChecker *EventChecker) UnpackDynamicEvent(log *types.Log) (*EventCheckerDynamic, error) {
|
func (eventChecker *EventChecker) UnpackDynamicEvent(log *types.Log) (*EventCheckerDynamic, error) {
|
||||||
event := "dynamic"
|
event := "dynamic"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != eventChecker.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(EventCheckerDynamic)
|
out := new(EventCheckerDynamic)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -112,8 +115,11 @@ func (EventCheckerEmpty) ContractEventName() string {
|
||||||
// Solidity: event empty()
|
// Solidity: event empty()
|
||||||
func (eventChecker *EventChecker) UnpackEmptyEvent(log *types.Log) (*EventCheckerEmpty, error) {
|
func (eventChecker *EventChecker) UnpackEmptyEvent(log *types.Log) (*EventCheckerEmpty, error) {
|
||||||
event := "empty"
|
event := "empty"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != eventChecker.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(EventCheckerEmpty)
|
out := new(EventCheckerEmpty)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -154,8 +160,11 @@ func (EventCheckerIndexed) ContractEventName() string {
|
||||||
// Solidity: event indexed(address indexed addr, int256 indexed num)
|
// Solidity: event indexed(address indexed addr, int256 indexed num)
|
||||||
func (eventChecker *EventChecker) UnpackIndexedEvent(log *types.Log) (*EventCheckerIndexed, error) {
|
func (eventChecker *EventChecker) UnpackIndexedEvent(log *types.Log) (*EventCheckerIndexed, error) {
|
||||||
event := "indexed"
|
event := "indexed"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != eventChecker.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(EventCheckerIndexed)
|
out := new(EventCheckerIndexed)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -196,8 +205,11 @@ func (EventCheckerMixed) ContractEventName() string {
|
||||||
// Solidity: event mixed(address indexed addr, int256 num)
|
// Solidity: event mixed(address indexed addr, int256 num)
|
||||||
func (eventChecker *EventChecker) UnpackMixedEvent(log *types.Log) (*EventCheckerMixed, error) {
|
func (eventChecker *EventChecker) UnpackMixedEvent(log *types.Log) (*EventCheckerMixed, error) {
|
||||||
event := "mixed"
|
event := "mixed"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != eventChecker.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(EventCheckerMixed)
|
out := new(EventCheckerMixed)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -238,8 +250,11 @@ func (EventCheckerUnnamed) ContractEventName() string {
|
||||||
// Solidity: event unnamed(uint256 indexed arg0, uint256 indexed arg1)
|
// Solidity: event unnamed(uint256 indexed arg0, uint256 indexed arg1)
|
||||||
func (eventChecker *EventChecker) UnpackUnnamedEvent(log *types.Log) (*EventCheckerUnnamed, error) {
|
func (eventChecker *EventChecker) UnpackUnnamedEvent(log *types.Log) (*EventCheckerUnnamed, error) {
|
||||||
event := "unnamed"
|
event := "unnamed"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != eventChecker.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != eventChecker.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(EventCheckerUnnamed)
|
out := new(EventCheckerUnnamed)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -134,8 +134,11 @@ func (NameConflictLog) ContractEventName() string {
|
||||||
// Solidity: event log(int256 msg, int256 _msg)
|
// Solidity: event log(int256 msg, int256 _msg)
|
||||||
func (nameConflict *NameConflict) UnpackLogEvent(log *types.Log) (*NameConflictLog, error) {
|
func (nameConflict *NameConflict) UnpackLogEvent(log *types.Log) (*NameConflictLog, error) {
|
||||||
event := "log"
|
event := "log"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != nameConflict.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != nameConflict.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(NameConflictLog)
|
out := new(NameConflictLog)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -136,8 +136,11 @@ func (NumericMethodNameE1TestEvent) ContractEventName() string {
|
||||||
// Solidity: event _1TestEvent(address _param)
|
// Solidity: event _1TestEvent(address _param)
|
||||||
func (numericMethodName *NumericMethodName) UnpackE1TestEventEvent(log *types.Log) (*NumericMethodNameE1TestEvent, error) {
|
func (numericMethodName *NumericMethodName) UnpackE1TestEventEvent(log *types.Log) (*NumericMethodNameE1TestEvent, error) {
|
||||||
event := "_1TestEvent"
|
event := "_1TestEvent"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != numericMethodName.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != numericMethodName.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(NumericMethodNameE1TestEvent)
|
out := new(NumericMethodNameE1TestEvent)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
14
accounts/abi/abigen/testdata/v2/overload.go.txt
vendored
14
accounts/abi/abigen/testdata/v2/overload.go.txt
vendored
|
|
@ -114,8 +114,11 @@ func (OverloadBar) ContractEventName() string {
|
||||||
// Solidity: event bar(uint256 i)
|
// Solidity: event bar(uint256 i)
|
||||||
func (overload *Overload) UnpackBarEvent(log *types.Log) (*OverloadBar, error) {
|
func (overload *Overload) UnpackBarEvent(log *types.Log) (*OverloadBar, error) {
|
||||||
event := "bar"
|
event := "bar"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != overload.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != overload.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(OverloadBar)
|
out := new(OverloadBar)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -156,8 +159,11 @@ func (OverloadBar0) ContractEventName() string {
|
||||||
// Solidity: event bar(uint256 i, uint256 j)
|
// Solidity: event bar(uint256 i, uint256 j)
|
||||||
func (overload *Overload) UnpackBar0Event(log *types.Log) (*OverloadBar0, error) {
|
func (overload *Overload) UnpackBar0Event(log *types.Log) (*OverloadBar0, error) {
|
||||||
event := "bar0"
|
event := "bar0"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != overload.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != overload.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(OverloadBar0)
|
out := new(OverloadBar0)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
7
accounts/abi/abigen/testdata/v2/token.go.txt
vendored
7
accounts/abi/abigen/testdata/v2/token.go.txt
vendored
|
|
@ -386,8 +386,11 @@ func (TokenTransfer) ContractEventName() string {
|
||||||
// Solidity: event Transfer(address indexed from, address indexed to, uint256 value)
|
// Solidity: event Transfer(address indexed from, address indexed to, uint256 value)
|
||||||
func (token *Token) UnpackTransferEvent(log *types.Log) (*TokenTransfer, error) {
|
func (token *Token) UnpackTransferEvent(log *types.Log) (*TokenTransfer, error) {
|
||||||
event := "Transfer"
|
event := "Transfer"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != token.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != token.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(TokenTransfer)
|
out := new(TokenTransfer)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
14
accounts/abi/abigen/testdata/v2/tuple.go.txt
vendored
14
accounts/abi/abigen/testdata/v2/tuple.go.txt
vendored
|
|
@ -193,8 +193,11 @@ func (TupleTupleEvent) ContractEventName() string {
|
||||||
// Solidity: event TupleEvent((uint256,uint256[],(uint256,uint256)[]) a, (uint256,uint256)[2][] b, (uint256,uint256)[][2] c, (uint256,uint256[],(uint256,uint256)[])[] d, uint256[] e)
|
// Solidity: event TupleEvent((uint256,uint256[],(uint256,uint256)[]) a, (uint256,uint256)[2][] b, (uint256,uint256)[][2] c, (uint256,uint256[],(uint256,uint256)[])[] d, uint256[] e)
|
||||||
func (tuple *Tuple) UnpackTupleEventEvent(log *types.Log) (*TupleTupleEvent, error) {
|
func (tuple *Tuple) UnpackTupleEventEvent(log *types.Log) (*TupleTupleEvent, error) {
|
||||||
event := "TupleEvent"
|
event := "TupleEvent"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != tuple.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != tuple.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(TupleTupleEvent)
|
out := new(TupleTupleEvent)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -234,8 +237,11 @@ func (TupleTupleEvent2) ContractEventName() string {
|
||||||
// Solidity: event TupleEvent2((uint8,uint8)[] arg0)
|
// Solidity: event TupleEvent2((uint8,uint8)[] arg0)
|
||||||
func (tuple *Tuple) UnpackTupleEvent2Event(log *types.Log) (*TupleTupleEvent2, error) {
|
func (tuple *Tuple) UnpackTupleEvent2Event(log *types.Log) (*TupleTupleEvent2, error) {
|
||||||
event := "TupleEvent2"
|
event := "TupleEvent2"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != tuple.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != tuple.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(TupleTupleEvent2)
|
out := new(TupleTupleEvent2)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -35,8 +35,8 @@ import (
|
||||||
const basefeeWiggleMultiplier = 2
|
const basefeeWiggleMultiplier = 2
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errNoEventSignature = errors.New("no event signature")
|
ErrNoEventSignature = errors.New("no event signature")
|
||||||
errEventSignatureMismatch = errors.New("event signature mismatch")
|
ErrEventSignatureMismatch = errors.New("event signature mismatch")
|
||||||
)
|
)
|
||||||
|
|
||||||
// SignerFn is a signer function callback when a contract requires a method to
|
// SignerFn is a signer function callback when a contract requires a method to
|
||||||
|
|
@ -536,10 +536,10 @@ func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]any)
|
||||||
func (c *BoundContract) UnpackLog(out any, event string, log types.Log) error {
|
func (c *BoundContract) UnpackLog(out any, event string, log types.Log) error {
|
||||||
// Anonymous events are not supported.
|
// Anonymous events are not supported.
|
||||||
if len(log.Topics) == 0 {
|
if len(log.Topics) == 0 {
|
||||||
return errNoEventSignature
|
return ErrNoEventSignature
|
||||||
}
|
}
|
||||||
if log.Topics[0] != c.abi.Events[event].ID {
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
return errEventSignatureMismatch
|
return ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
|
||||||
|
|
@ -559,10 +559,10 @@ func (c *BoundContract) UnpackLog(out any, event string, log types.Log) error {
|
||||||
func (c *BoundContract) UnpackLogIntoMap(out map[string]any, event string, log types.Log) error {
|
func (c *BoundContract) UnpackLogIntoMap(out map[string]any, event string, log types.Log) error {
|
||||||
// Anonymous events are not supported.
|
// Anonymous events are not supported.
|
||||||
if len(log.Topics) == 0 {
|
if len(log.Topics) == 0 {
|
||||||
return errNoEventSignature
|
return ErrNoEventSignature
|
||||||
}
|
}
|
||||||
if log.Topics[0] != c.abi.Events[event].ID {
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
return errEventSignatureMismatch
|
return ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
|
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -158,10 +158,10 @@ func testLinkCase(tcInput linkTestCaseInput) error {
|
||||||
overrideAddrs = make(map[rune]common.Address)
|
overrideAddrs = make(map[rune]common.Address)
|
||||||
)
|
)
|
||||||
// generate deterministic addresses for the override set.
|
// generate deterministic addresses for the override set.
|
||||||
rand.Seed(42)
|
rng := rand.New(rand.NewSource(42))
|
||||||
for contract := range tcInput.overrides {
|
for contract := range tcInput.overrides {
|
||||||
var addr common.Address
|
var addr common.Address
|
||||||
rand.Read(addr[:])
|
rng.Read(addr[:])
|
||||||
overrideAddrs[contract] = addr
|
overrideAddrs[contract] = addr
|
||||||
overridesAddrs[addr] = struct{}{}
|
overridesAddrs[addr] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -276,8 +276,11 @@ func (DBInsert) ContractEventName() string {
|
||||||
// Solidity: event Insert(uint256 key, uint256 value, uint256 length)
|
// Solidity: event Insert(uint256 key, uint256 value, uint256 length)
|
||||||
func (dB *DB) UnpackInsertEvent(log *types.Log) (*DBInsert, error) {
|
func (dB *DB) UnpackInsertEvent(log *types.Log) (*DBInsert, error) {
|
||||||
event := "Insert"
|
event := "Insert"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != dB.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != dB.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(DBInsert)
|
out := new(DBInsert)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -318,8 +321,11 @@ func (DBKeyedInsert) ContractEventName() string {
|
||||||
// Solidity: event KeyedInsert(uint256 indexed key, uint256 value)
|
// Solidity: event KeyedInsert(uint256 indexed key, uint256 value)
|
||||||
func (dB *DB) UnpackKeyedInsertEvent(log *types.Log) (*DBKeyedInsert, error) {
|
func (dB *DB) UnpackKeyedInsertEvent(log *types.Log) (*DBKeyedInsert, error) {
|
||||||
event := "KeyedInsert"
|
event := "KeyedInsert"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != dB.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != dB.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(DBKeyedInsert)
|
out := new(DBKeyedInsert)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -115,8 +115,11 @@ func (CBasic1) ContractEventName() string {
|
||||||
// Solidity: event basic1(uint256 indexed id, uint256 data)
|
// Solidity: event basic1(uint256 indexed id, uint256 data)
|
||||||
func (c *C) UnpackBasic1Event(log *types.Log) (*CBasic1, error) {
|
func (c *C) UnpackBasic1Event(log *types.Log) (*CBasic1, error) {
|
||||||
event := "basic1"
|
event := "basic1"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != c.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(CBasic1)
|
out := new(CBasic1)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
@ -157,8 +160,11 @@ func (CBasic2) ContractEventName() string {
|
||||||
// Solidity: event basic2(bool indexed flag, uint256 data)
|
// Solidity: event basic2(bool indexed flag, uint256 data)
|
||||||
func (c *C) UnpackBasic2Event(log *types.Log) (*CBasic2, error) {
|
func (c *C) UnpackBasic2Event(log *types.Log) (*CBasic2, error) {
|
||||||
event := "basic2"
|
event := "basic2"
|
||||||
if len(log.Topics) == 0 || log.Topics[0] != c.abi.Events[event].ID {
|
if len(log.Topics) == 0 {
|
||||||
return nil, errors.New("event signature mismatch")
|
return nil, bind.ErrNoEventSignature
|
||||||
|
}
|
||||||
|
if log.Topics[0] != c.abi.Events[event].ID {
|
||||||
|
return nil, bind.ErrEventSignatureMismatch
|
||||||
}
|
}
|
||||||
out := new(CBasic2)
|
out := new(CBasic2)
|
||||||
if len(log.Data) > 0 {
|
if len(log.Data) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -379,16 +379,16 @@ func TestEventUnpackEmptyTopics(t *testing.T) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error when unpacking event with empty topics, got nil")
|
t.Fatal("expected error when unpacking event with empty topics, got nil")
|
||||||
}
|
}
|
||||||
if err.Error() != "event signature mismatch" {
|
if err != bind.ErrNoEventSignature {
|
||||||
t.Fatalf("expected 'event signature mismatch' error, got: %v", err)
|
t.Fatalf("expected 'no event signature' error, got: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = c.UnpackBasic2Event(log)
|
_, err = c.UnpackBasic2Event(log)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error when unpacking event with empty topics, got nil")
|
t.Fatal("expected error when unpacking event with empty topics, got nil")
|
||||||
}
|
}
|
||||||
if err.Error() != "event signature mismatch" {
|
if err != bind.ErrNoEventSignature {
|
||||||
t.Fatalf("expected 'event signature mismatch' error, got: %v", err)
|
t.Fatalf("expected 'no event signature' error, got: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,8 @@ import (
|
||||||
"github.com/ethereum/go-ethereum"
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/keccak"
|
||||||
"github.com/ethereum/go-ethereum/event"
|
"github.com/ethereum/go-ethereum/event"
|
||||||
"golang.org/x/crypto/sha3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Account represents an Ethereum account located at a specific location defined
|
// Account represents an Ethereum account located at a specific location defined
|
||||||
|
|
@ -196,7 +196,7 @@ func TextHash(data []byte) []byte {
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
func TextAndHash(data []byte) ([]byte, string) {
|
func TextAndHash(data []byte) ([]byte, string) {
|
||||||
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
|
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
|
||||||
hasher := sha3.NewLegacyKeccak256()
|
hasher := keccak.NewLegacyKeccak256()
|
||||||
hasher.Write([]byte(msg))
|
hasher.Write([]byte(msg))
|
||||||
return hasher.Sum(nil), msg
|
return hasher.Sum(nil), msg
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -68,18 +68,27 @@ func waitWatcherStart(ks *KeyStore) bool {
|
||||||
|
|
||||||
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
|
||||||
var list []accounts.Account
|
var list []accounts.Account
|
||||||
|
haveAccounts := false
|
||||||
|
haveChange := false
|
||||||
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(100 * time.Millisecond) {
|
for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(100 * time.Millisecond) {
|
||||||
|
if !haveAccounts {
|
||||||
list = ks.Accounts()
|
list = ks.Accounts()
|
||||||
if reflect.DeepEqual(list, wantAccounts) {
|
haveAccounts = reflect.DeepEqual(list, wantAccounts)
|
||||||
// ks should have also received change notifications
|
}
|
||||||
|
if !haveChange {
|
||||||
select {
|
select {
|
||||||
case <-ks.changes:
|
case <-ks.changes:
|
||||||
|
haveChange = true
|
||||||
default:
|
default:
|
||||||
return errors.New("wasn't notified of new accounts")
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if haveAccounts && haveChange {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if haveAccounts {
|
||||||
|
return errors.New("wasn't notified of new accounts")
|
||||||
|
}
|
||||||
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
|
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -418,6 +418,7 @@ func (ks *KeyStore) Export(a accounts.Account, passphrase, newPassphrase string)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer zeroKey(key.PrivateKey)
|
||||||
var N, P int
|
var N, P int
|
||||||
if store, ok := ks.storage.(*keyStorePassphrase); ok {
|
if store, ok := ks.storage.(*keyStorePassphrase); ok {
|
||||||
N, P = store.scryptN, store.scryptP
|
N, P = store.scryptN, store.scryptP
|
||||||
|
|
@ -477,6 +478,7 @@ func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer zeroKey(key.PrivateKey)
|
||||||
return ks.storage.StoreKey(a.URL.Path, key, newPassphrase)
|
return ks.storage.StoreKey(a.URL.Path, key, newPassphrase)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -81,6 +81,9 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||||
*/
|
*/
|
||||||
passBytes := []byte(password)
|
passBytes := []byte(password)
|
||||||
derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New)
|
derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New)
|
||||||
|
if len(cipherText)%aes.BlockSize != 0 {
|
||||||
|
return nil, errors.New("ciphertext must be a multiple of block size")
|
||||||
|
}
|
||||||
plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv)
|
plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -300,6 +300,10 @@ func (s *SecureChannelSession) decryptAPDU(data []byte) ([]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(data) == 0 || len(data)%aes.BlockSize != 0 {
|
||||||
|
return nil, fmt.Errorf("invalid ciphertext length: %d", len(data))
|
||||||
|
}
|
||||||
|
|
||||||
ret := make([]byte, len(data))
|
ret := make([]byte, len(data))
|
||||||
|
|
||||||
crypter := cipher.NewCBCDecrypter(a, s.iv)
|
crypter := cipher.NewCBCDecrypter(a, s.iv)
|
||||||
|
|
|
||||||
|
|
@ -43,6 +43,14 @@ const refreshCycle = time.Second
|
||||||
// trashing.
|
// trashing.
|
||||||
const refreshThrottling = 500 * time.Millisecond
|
const refreshThrottling = 500 * time.Millisecond
|
||||||
|
|
||||||
|
const (
|
||||||
|
// deviceUsagePage identifies Ledger devices by HID usage page (0xffa0) on Windows and macOS.
|
||||||
|
// See: https://github.com/LedgerHQ/ledger-live/blob/05a2980e838955a11a1418da638ef8ac3df4fb74/libs/ledgerjs/packages/hw-transport-node-hid-noevents/src/TransportNodeHid.ts
|
||||||
|
deviceUsagePage = 0xffa0
|
||||||
|
// deviceInterface identifies Ledger devices by USB interface number (0) on Linux.
|
||||||
|
deviceInterface = 0
|
||||||
|
)
|
||||||
|
|
||||||
// Hub is a accounts.Backend that can find and handle generic USB hardware wallets.
|
// Hub is a accounts.Backend that can find and handle generic USB hardware wallets.
|
||||||
type Hub struct {
|
type Hub struct {
|
||||||
scheme string // Protocol scheme prefixing account and wallet URLs.
|
scheme string // Protocol scheme prefixing account and wallet URLs.
|
||||||
|
|
@ -82,6 +90,7 @@ func NewLedgerHub() (*Hub, error) {
|
||||||
0x0005, /* Ledger Nano S Plus */
|
0x0005, /* Ledger Nano S Plus */
|
||||||
0x0006, /* Ledger Nano FTS */
|
0x0006, /* Ledger Nano FTS */
|
||||||
0x0007, /* Ledger Flex */
|
0x0007, /* Ledger Flex */
|
||||||
|
0x0008, /* Ledger Nano Gen5 */
|
||||||
|
|
||||||
0x0000, /* WebUSB Ledger Blue */
|
0x0000, /* WebUSB Ledger Blue */
|
||||||
0x1000, /* WebUSB Ledger Nano S */
|
0x1000, /* WebUSB Ledger Nano S */
|
||||||
|
|
@ -89,7 +98,8 @@ func NewLedgerHub() (*Hub, error) {
|
||||||
0x5000, /* WebUSB Ledger Nano S Plus */
|
0x5000, /* WebUSB Ledger Nano S Plus */
|
||||||
0x6000, /* WebUSB Ledger Nano FTS */
|
0x6000, /* WebUSB Ledger Nano FTS */
|
||||||
0x7000, /* WebUSB Ledger Flex */
|
0x7000, /* WebUSB Ledger Flex */
|
||||||
}, 0xffa0, 0, newLedgerDriver)
|
0x8000, /* WebUSB Ledger Nano Gen5 */
|
||||||
|
}, deviceUsagePage, deviceInterface, newLedgerDriver)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTrezorHubWithHID creates a new hardware wallet manager for Trezor devices.
|
// NewTrezorHubWithHID creates a new hardware wallet manager for Trezor devices.
|
||||||
|
|
|
||||||
|
|
@ -184,7 +184,7 @@ func (w *ledgerDriver) SignTypedMessage(path accounts.DerivationPath, domainHash
|
||||||
return nil, accounts.ErrWalletClosed
|
return nil, accounts.ErrWalletClosed
|
||||||
}
|
}
|
||||||
// Ensure the wallet is capable of signing the given transaction
|
// Ensure the wallet is capable of signing the given transaction
|
||||||
if w.version[0] < 1 && w.version[1] < 5 {
|
if w.version[0] < 1 || (w.version[0] == 1 && w.version[1] < 5) {
|
||||||
//lint:ignore ST1005 brand name displayed on the console
|
//lint:ignore ST1005 brand name displayed on the console
|
||||||
return nil, fmt.Errorf("Ledger version >= 1.5.0 required for EIP-712 signing (found version v%d.%d.%d)", w.version[0], w.version[1], w.version[2])
|
return nil, fmt.Errorf("Ledger version >= 1.5.0 required for EIP-712 signing (found version v%d.%d.%d)", w.version[0], w.version[1], w.version[2])
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -632,7 +632,7 @@ func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID
|
||||||
// data is not supported for Ledger wallets, so this method will always return
|
// data is not supported for Ledger wallets, so this method will always return
|
||||||
// an error.
|
// an error.
|
||||||
func (w *wallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {
|
func (w *wallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {
|
||||||
return w.SignText(account, accounts.TextHash(text))
|
return w.SignText(account, text)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
|
||||||
|
|
|
||||||
|
|
@ -87,6 +87,10 @@ func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
|
||||||
if status, err := ec.callForkchoiceUpdated(forkName, event); err == nil {
|
if status, err := ec.callForkchoiceUpdated(forkName, event); err == nil {
|
||||||
log.Info("Successful ForkchoiceUpdated", "head", event.Block.Hash(), "status", status)
|
log.Info("Successful ForkchoiceUpdated", "head", event.Block.Hash(), "status", status)
|
||||||
} else {
|
} else {
|
||||||
|
if err.Error() == "beacon syncer reorging" {
|
||||||
|
log.Debug("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err)
|
||||||
|
continue // ignore beacon syncer reorging errors, this error can occur if the blsync is skipping a block
|
||||||
|
}
|
||||||
log.Error("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err)
|
log.Error("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -101,7 +105,16 @@ func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent)
|
||||||
params = []any{execData}
|
params = []any{execData}
|
||||||
)
|
)
|
||||||
switch fork {
|
switch fork {
|
||||||
case "electra":
|
case "altair", "bellatrix":
|
||||||
|
method = "engine_newPayloadV1"
|
||||||
|
case "capella":
|
||||||
|
method = "engine_newPayloadV2"
|
||||||
|
case "deneb":
|
||||||
|
method = "engine_newPayloadV3"
|
||||||
|
parentBeaconRoot := event.BeaconHead.ParentRoot
|
||||||
|
blobHashes := collectBlobHashes(event.Block)
|
||||||
|
params = append(params, blobHashes, parentBeaconRoot)
|
||||||
|
default: // electra, fulu and above
|
||||||
method = "engine_newPayloadV4"
|
method = "engine_newPayloadV4"
|
||||||
parentBeaconRoot := event.BeaconHead.ParentRoot
|
parentBeaconRoot := event.BeaconHead.ParentRoot
|
||||||
blobHashes := collectBlobHashes(event.Block)
|
blobHashes := collectBlobHashes(event.Block)
|
||||||
|
|
@ -110,15 +123,6 @@ func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent)
|
||||||
hexRequests[i] = hexutil.Bytes(event.ExecRequests[i])
|
hexRequests[i] = hexutil.Bytes(event.ExecRequests[i])
|
||||||
}
|
}
|
||||||
params = append(params, blobHashes, parentBeaconRoot, hexRequests)
|
params = append(params, blobHashes, parentBeaconRoot, hexRequests)
|
||||||
case "deneb":
|
|
||||||
method = "engine_newPayloadV3"
|
|
||||||
parentBeaconRoot := event.BeaconHead.ParentRoot
|
|
||||||
blobHashes := collectBlobHashes(event.Block)
|
|
||||||
params = append(params, blobHashes, parentBeaconRoot)
|
|
||||||
case "capella":
|
|
||||||
method = "engine_newPayloadV2"
|
|
||||||
default:
|
|
||||||
method = "engine_newPayloadV1"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
|
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
|
||||||
|
|
@ -145,12 +149,12 @@ func (ec *engineClient) callForkchoiceUpdated(fork string, event types.ChainHead
|
||||||
|
|
||||||
var method string
|
var method string
|
||||||
switch fork {
|
switch fork {
|
||||||
case "deneb", "electra":
|
case "altair", "bellatrix":
|
||||||
method = "engine_forkchoiceUpdatedV3"
|
method = "engine_forkchoiceUpdatedV1"
|
||||||
case "capella":
|
case "capella":
|
||||||
method = "engine_forkchoiceUpdatedV2"
|
method = "engine_forkchoiceUpdatedV2"
|
||||||
default:
|
default: // deneb, electra, fulu and above
|
||||||
method = "engine_forkchoiceUpdatedV1"
|
method = "engine_forkchoiceUpdatedV3"
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
|
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ func (p PayloadAttributes) MarshalJSON() ([]byte, error) {
|
||||||
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
||||||
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||||
|
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
|
||||||
}
|
}
|
||||||
var enc PayloadAttributes
|
var enc PayloadAttributes
|
||||||
enc.Timestamp = hexutil.Uint64(p.Timestamp)
|
enc.Timestamp = hexutil.Uint64(p.Timestamp)
|
||||||
|
|
@ -28,6 +29,7 @@ func (p PayloadAttributes) MarshalJSON() ([]byte, error) {
|
||||||
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
|
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
|
||||||
enc.Withdrawals = p.Withdrawals
|
enc.Withdrawals = p.Withdrawals
|
||||||
enc.BeaconRoot = p.BeaconRoot
|
enc.BeaconRoot = p.BeaconRoot
|
||||||
|
enc.SlotNumber = (*hexutil.Uint64)(p.SlotNumber)
|
||||||
return json.Marshal(&enc)
|
return json.Marshal(&enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -39,6 +41,7 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
|
||||||
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
||||||
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||||
|
SlotNumber *hexutil.Uint64 `json:"slotNumber"`
|
||||||
}
|
}
|
||||||
var dec PayloadAttributes
|
var dec PayloadAttributes
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
|
|
@ -62,5 +65,8 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
|
||||||
if dec.BeaconRoot != nil {
|
if dec.BeaconRoot != nil {
|
||||||
p.BeaconRoot = dec.BeaconRoot
|
p.BeaconRoot = dec.BeaconRoot
|
||||||
}
|
}
|
||||||
|
if dec.SlotNumber != nil {
|
||||||
|
p.SlotNumber = (*uint64)(dec.SlotNumber)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
||||||
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
|
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
|
||||||
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
|
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
|
||||||
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
|
SlotNumber *hexutil.Uint64 `json:"slotNumber,omitempty"`
|
||||||
}
|
}
|
||||||
var enc ExecutableData
|
var enc ExecutableData
|
||||||
enc.ParentHash = e.ParentHash
|
enc.ParentHash = e.ParentHash
|
||||||
|
|
@ -59,7 +59,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
|
||||||
enc.Withdrawals = e.Withdrawals
|
enc.Withdrawals = e.Withdrawals
|
||||||
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
|
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
|
||||||
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
|
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
|
||||||
enc.ExecutionWitness = e.ExecutionWitness
|
enc.SlotNumber = (*hexutil.Uint64)(e.SlotNumber)
|
||||||
return json.Marshal(&enc)
|
return json.Marshal(&enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -83,7 +83,7 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
||||||
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
|
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
|
||||||
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
|
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
|
||||||
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
|
SlotNumber *hexutil.Uint64 `json:"slotNumber,omitempty"`
|
||||||
}
|
}
|
||||||
var dec ExecutableData
|
var dec ExecutableData
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
|
|
@ -157,8 +157,8 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
|
||||||
if dec.ExcessBlobGas != nil {
|
if dec.ExcessBlobGas != nil {
|
||||||
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
|
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
|
||||||
}
|
}
|
||||||
if dec.ExecutionWitness != nil {
|
if dec.SlotNumber != nil {
|
||||||
e.ExecutionWitness = dec.ExecutionWitness
|
e.SlotNumber = (*uint64)(dec.SlotNumber)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -50,6 +50,13 @@ var (
|
||||||
// ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new
|
// ExecutionPayloadV3 has the syntax of ExecutionPayloadV2 and appends the new
|
||||||
// fields: blobGasUsed and excessBlobGas.
|
// fields: blobGasUsed and excessBlobGas.
|
||||||
PayloadV3 PayloadVersion = 0x3
|
PayloadV3 PayloadVersion = 0x3
|
||||||
|
|
||||||
|
// PayloadV4 is the identifier of ExecutionPayloadV4 introduced in amsterdam fork.
|
||||||
|
//
|
||||||
|
// https://github.com/ethereum/execution-apis/blob/main/src/engine/amsterdam.md#executionpayloadv4
|
||||||
|
// ExecutionPayloadV4 has the syntax of ExecutionPayloadV3 and appends the new
|
||||||
|
// field slotNumber.
|
||||||
|
PayloadV4 PayloadVersion = 0x4
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
|
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
|
||||||
|
|
@ -62,11 +69,13 @@ type PayloadAttributes struct {
|
||||||
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
||||||
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||||
|
SlotNumber *uint64 `json:"slotNumber"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSON type overrides for PayloadAttributes.
|
// JSON type overrides for PayloadAttributes.
|
||||||
type payloadAttributesMarshaling struct {
|
type payloadAttributesMarshaling struct {
|
||||||
Timestamp hexutil.Uint64
|
Timestamp hexutil.Uint64
|
||||||
|
SlotNumber *hexutil.Uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type ExecutableData -field-override executableDataMarshaling -out gen_ed.go
|
//go:generate go run github.com/fjl/gencodec -type ExecutableData -field-override executableDataMarshaling -out gen_ed.go
|
||||||
|
|
@ -90,7 +99,7 @@ type ExecutableData struct {
|
||||||
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
Withdrawals []*types.Withdrawal `json:"withdrawals"`
|
||||||
BlobGasUsed *uint64 `json:"blobGasUsed"`
|
BlobGasUsed *uint64 `json:"blobGasUsed"`
|
||||||
ExcessBlobGas *uint64 `json:"excessBlobGas"`
|
ExcessBlobGas *uint64 `json:"excessBlobGas"`
|
||||||
ExecutionWitness *types.ExecutionWitness `json:"executionWitness,omitempty"`
|
SlotNumber *uint64 `json:"slotNumber,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSON type overrides for executableData.
|
// JSON type overrides for executableData.
|
||||||
|
|
@ -105,6 +114,7 @@ type executableDataMarshaling struct {
|
||||||
Transactions []hexutil.Bytes
|
Transactions []hexutil.Bytes
|
||||||
BlobGasUsed *hexutil.Uint64
|
BlobGasUsed *hexutil.Uint64
|
||||||
ExcessBlobGas *hexutil.Uint64
|
ExcessBlobGas *hexutil.Uint64
|
||||||
|
SlotNumber *hexutil.Uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// StatelessPayloadStatusV1 is the result of a stateless payload execution.
|
// StatelessPayloadStatusV1 is the result of a stateless payload execution.
|
||||||
|
|
@ -214,7 +224,7 @@ func encodeTransactions(txs []*types.Transaction) [][]byte {
|
||||||
return enc
|
return enc
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
func DecodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
||||||
var txs = make([]*types.Transaction, len(enc))
|
var txs = make([]*types.Transaction, len(enc))
|
||||||
for i, encTx := range enc {
|
for i, encTx := range enc {
|
||||||
var tx types.Transaction
|
var tx types.Transaction
|
||||||
|
|
@ -252,7 +262,7 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
|
||||||
// for stateless execution, so it skips checking if the executable data hashes to
|
// for stateless execution, so it skips checking if the executable data hashes to
|
||||||
// the requested hash (stateless has to *compute* the root hash, it's not given).
|
// the requested hash (stateless has to *compute* the root hash, it's not given).
|
||||||
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
|
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, requests [][]byte) (*types.Block, error) {
|
||||||
txs, err := decodeTransactions(data.Transactions)
|
txs, err := DecodeTransactions(data.Transactions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -314,10 +324,10 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
|
||||||
BlobGasUsed: data.BlobGasUsed,
|
BlobGasUsed: data.BlobGasUsed,
|
||||||
ParentBeaconRoot: beaconRoot,
|
ParentBeaconRoot: beaconRoot,
|
||||||
RequestsHash: requestsHash,
|
RequestsHash: requestsHash,
|
||||||
|
SlotNumber: data.SlotNumber,
|
||||||
}
|
}
|
||||||
return types.NewBlockWithHeader(header).
|
return types.NewBlockWithHeader(header).
|
||||||
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}).
|
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals}),
|
||||||
WithWitness(data.ExecutionWitness),
|
|
||||||
nil
|
nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -342,7 +352,7 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
|
||||||
Withdrawals: block.Withdrawals(),
|
Withdrawals: block.Withdrawals(),
|
||||||
BlobGasUsed: block.BlobGasUsed(),
|
BlobGasUsed: block.BlobGasUsed(),
|
||||||
ExcessBlobGas: block.ExcessBlobGas(),
|
ExcessBlobGas: block.ExcessBlobGas(),
|
||||||
ExecutionWitness: block.ExecutionWitness(),
|
SlotNumber: block.SlotNumber(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add blobs.
|
// Add blobs.
|
||||||
|
|
|
||||||
|
|
@ -69,7 +69,10 @@ func newCanonicalStore[T any](db ethdb.Iteratee, keyPrefix []byte) (*canonicalSt
|
||||||
|
|
||||||
// databaseKey returns the database key belonging to the given period.
|
// databaseKey returns the database key belonging to the given period.
|
||||||
func (cs *canonicalStore[T]) databaseKey(period uint64) []byte {
|
func (cs *canonicalStore[T]) databaseKey(period uint64) []byte {
|
||||||
return binary.BigEndian.AppendUint64(append([]byte{}, cs.keyPrefix...), period)
|
key := make([]byte, len(cs.keyPrefix)+8)
|
||||||
|
copy(key, cs.keyPrefix)
|
||||||
|
binary.BigEndian.PutUint64(key[len(cs.keyPrefix):], period)
|
||||||
|
return key
|
||||||
}
|
}
|
||||||
|
|
||||||
// add adds the given item to the database. It also ensures that the range remains
|
// add adds the given item to the database. It also ensures that the range remains
|
||||||
|
|
|
||||||
|
|
@ -438,14 +438,11 @@ func (s *serverWithLimits) fail(desc string) {
|
||||||
// failLocked calculates the dynamic failure delay and applies it.
|
// failLocked calculates the dynamic failure delay and applies it.
|
||||||
func (s *serverWithLimits) failLocked(desc string) {
|
func (s *serverWithLimits) failLocked(desc string) {
|
||||||
log.Debug("Server error", "description", desc)
|
log.Debug("Server error", "description", desc)
|
||||||
s.failureDelay *= 2
|
|
||||||
now := s.clock.Now()
|
now := s.clock.Now()
|
||||||
if now > s.failureDelayEnd {
|
if now > s.failureDelayEnd {
|
||||||
s.failureDelay *= math.Pow(2, -float64(now-s.failureDelayEnd)/float64(maxFailureDelay))
|
s.failureDelay *= math.Pow(2, -float64(now-s.failureDelayEnd)/float64(maxFailureDelay))
|
||||||
}
|
}
|
||||||
if s.failureDelay < float64(minFailureDelay) {
|
s.failureDelay = max(min(s.failureDelay*2, float64(maxFailureDelay)), float64(minFailureDelay))
|
||||||
s.failureDelay = float64(minFailureDelay)
|
|
||||||
}
|
|
||||||
s.failureDelayEnd = now + mclock.AbsTime(s.failureDelay)
|
s.failureDelayEnd = now + mclock.AbsTime(s.failureDelay)
|
||||||
s.delay(time.Duration(s.failureDelay))
|
s.delay(time.Duration(s.failureDelay))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -105,6 +105,7 @@ func (s *HeadSync) Process(requester request.Requester, events []request.Event)
|
||||||
delete(s.serverHeads, event.Server)
|
delete(s.serverHeads, event.Server)
|
||||||
delete(s.unvalidatedOptimistic, event.Server)
|
delete(s.unvalidatedOptimistic, event.Server)
|
||||||
delete(s.unvalidatedFinality, event.Server)
|
delete(s.unvalidatedFinality, event.Server)
|
||||||
|
delete(s.reqFinalityEpoch, event.Server)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,6 @@ const (
|
||||||
ssNeedParent // cp header slot %32 != 0, need parent to check epoch boundary
|
ssNeedParent // cp header slot %32 != 0, need parent to check epoch boundary
|
||||||
ssParentRequested // cp parent header requested
|
ssParentRequested // cp parent header requested
|
||||||
ssPrintStatus // has all necessary info, print log message if init still not successful
|
ssPrintStatus // has all necessary info, print log message if init still not successful
|
||||||
ssDone // log message printed, no more action required
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type serverState struct {
|
type serverState struct {
|
||||||
|
|
@ -180,7 +179,8 @@ func (s *CheckpointInit) Process(requester request.Requester, events []request.E
|
||||||
default:
|
default:
|
||||||
log.Error("blsync: checkpoint not available, but reported as finalized; specified checkpoint hash might be too old", "server", server.Name())
|
log.Error("blsync: checkpoint not available, but reported as finalized; specified checkpoint hash might be too old", "server", server.Name())
|
||||||
}
|
}
|
||||||
s.serverState[server] = serverState{state: ssDone}
|
s.serverState[server] = serverState{state: ssDefault}
|
||||||
|
requester.Fail(server, "checkpoint init failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1 +1 @@
|
||||||
0x1bbf958008172591b6cbdb3d8d52e26998258e83d4bdb9eec10969d84519a6bd
|
0xbb7a7f3c40d8ea0b450f91587db65d0f1c079669277e01a0426c8911702a863a
|
||||||
|
|
@ -1 +1 @@
|
||||||
0x2fe39a39b6f7cbd549e0f74d259de6db486005a65bd3bd92840dd6ce21d6f4c8
|
0x2af778d703186526a1b6304b423f338f11556206f618643c3f7fa0d7b1ef5c9b
|
||||||
|
|
@ -1 +1 @@
|
||||||
0x86686b2b366e24134e0e3969a9c5f3759f92e5d2b04785b42e22cc7d468c2107
|
0x48a89c9ea7ba19de2931797974cf8722344ab231c0edada278b108ef74125478
|
||||||
|
|
@ -38,7 +38,7 @@ import (
|
||||||
// across signing different data structures.
|
// across signing different data structures.
|
||||||
const syncCommitteeDomain = 7
|
const syncCommitteeDomain = 7
|
||||||
|
|
||||||
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"}
|
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB", "ELECTRA", "FULU"}
|
||||||
|
|
||||||
// ClientConfig contains beacon light client configuration.
|
// ClientConfig contains beacon light client configuration.
|
||||||
type ClientConfig struct {
|
type ClientConfig struct {
|
||||||
|
|
@ -103,6 +103,9 @@ func (c *ChainConfig) LoadForks(file []byte) error {
|
||||||
epochs["GENESIS"] = 0
|
epochs["GENESIS"] = 0
|
||||||
|
|
||||||
for key, value := range config {
|
for key, value := range config {
|
||||||
|
if value == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if strings.HasSuffix(key, "_FORK_VERSION") {
|
if strings.HasSuffix(key, "_FORK_VERSION") {
|
||||||
name := key[:len(key)-len("_FORK_VERSION")]
|
name := key[:len(key)-len("_FORK_VERSION")]
|
||||||
switch version := value.(type) {
|
switch version := value.(type) {
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,9 @@ ALTAIR_FORK_EPOCH: 1
|
||||||
EIP7928_FORK_VERSION: 0xb0000038
|
EIP7928_FORK_VERSION: 0xb0000038
|
||||||
EIP7928_FORK_EPOCH: 18446744073709551615
|
EIP7928_FORK_EPOCH: 18446744073709551615
|
||||||
|
|
||||||
|
EIP7XXX_FORK_VERSION:
|
||||||
|
EIP7XXX_FORK_EPOCH:
|
||||||
|
|
||||||
BLOB_SCHEDULE: []
|
BLOB_SCHEDULE: []
|
||||||
`
|
`
|
||||||
c := &ChainConfig{}
|
c := &ChainConfig{}
|
||||||
|
|
|
||||||
|
|
@ -40,36 +40,39 @@ var (
|
||||||
GenesisTime: 1606824023,
|
GenesisTime: 1606824023,
|
||||||
Checkpoint: common.HexToHash(checkpointMainnet),
|
Checkpoint: common.HexToHash(checkpointMainnet),
|
||||||
}).
|
}).
|
||||||
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
|
AddFork("GENESIS", 0, common.FromHex("0x00000000")).
|
||||||
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
|
AddFork("ALTAIR", 74240, common.FromHex("0x01000000")).
|
||||||
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
|
AddFork("BELLATRIX", 144896, common.FromHex("0x02000000")).
|
||||||
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
|
AddFork("CAPELLA", 194048, common.FromHex("0x03000000")).
|
||||||
AddFork("DENEB", 269568, []byte{4, 0, 0, 0}).
|
AddFork("DENEB", 269568, common.FromHex("0x04000000")).
|
||||||
AddFork("ELECTRA", 364032, []byte{5, 0, 0, 0})
|
AddFork("ELECTRA", 364032, common.FromHex("0x05000000")).
|
||||||
|
AddFork("FULU", 411392, common.FromHex("0x06000000"))
|
||||||
|
|
||||||
SepoliaLightConfig = (&ChainConfig{
|
SepoliaLightConfig = (&ChainConfig{
|
||||||
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
|
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
|
||||||
GenesisTime: 1655733600,
|
GenesisTime: 1655733600,
|
||||||
Checkpoint: common.HexToHash(checkpointSepolia),
|
Checkpoint: common.HexToHash(checkpointSepolia),
|
||||||
}).
|
}).
|
||||||
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
|
AddFork("GENESIS", 0, common.FromHex("0x90000069")).
|
||||||
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
|
AddFork("ALTAIR", 50, common.FromHex("0x90000070")).
|
||||||
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
|
AddFork("BELLATRIX", 100, common.FromHex("0x90000071")).
|
||||||
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
|
AddFork("CAPELLA", 56832, common.FromHex("0x90000072")).
|
||||||
AddFork("DENEB", 132608, []byte{144, 0, 0, 115}).
|
AddFork("DENEB", 132608, common.FromHex("0x90000073")).
|
||||||
AddFork("ELECTRA", 222464, []byte{144, 0, 0, 116})
|
AddFork("ELECTRA", 222464, common.FromHex("0x90000074")).
|
||||||
|
AddFork("FULU", 272640, common.FromHex("0x90000075"))
|
||||||
|
|
||||||
HoleskyLightConfig = (&ChainConfig{
|
HoleskyLightConfig = (&ChainConfig{
|
||||||
GenesisValidatorsRoot: common.HexToHash("0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"),
|
GenesisValidatorsRoot: common.HexToHash("0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1"),
|
||||||
GenesisTime: 1695902400,
|
GenesisTime: 1695902400,
|
||||||
Checkpoint: common.HexToHash(checkpointHolesky),
|
Checkpoint: common.HexToHash(checkpointHolesky),
|
||||||
}).
|
}).
|
||||||
AddFork("GENESIS", 0, []byte{1, 1, 112, 0}).
|
AddFork("GENESIS", 0, common.FromHex("0x01017000")).
|
||||||
AddFork("ALTAIR", 0, []byte{2, 1, 112, 0}).
|
AddFork("ALTAIR", 0, common.FromHex("0x02017000")).
|
||||||
AddFork("BELLATRIX", 0, []byte{3, 1, 112, 0}).
|
AddFork("BELLATRIX", 0, common.FromHex("0x03017000")).
|
||||||
AddFork("CAPELLA", 256, []byte{4, 1, 112, 0}).
|
AddFork("CAPELLA", 256, common.FromHex("0x04017000")).
|
||||||
AddFork("DENEB", 29696, []byte{5, 1, 112, 0}).
|
AddFork("DENEB", 29696, common.FromHex("0x05017000")).
|
||||||
AddFork("ELECTRA", 115968, []byte{6, 1, 112, 0})
|
AddFork("ELECTRA", 115968, common.FromHex("0x06017000")).
|
||||||
|
AddFork("FULU", 165120, common.FromHex("0x07017000"))
|
||||||
|
|
||||||
HoodiLightConfig = (&ChainConfig{
|
HoodiLightConfig = (&ChainConfig{
|
||||||
GenesisValidatorsRoot: common.HexToHash("0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f"),
|
GenesisValidatorsRoot: common.HexToHash("0x212f13fc4df078b6cb7db228f1c8307566dcecf900867401a92023d7ba99cb5f"),
|
||||||
|
|
@ -82,5 +85,5 @@ var (
|
||||||
AddFork("CAPELLA", 0, common.FromHex("0x40000910")).
|
AddFork("CAPELLA", 0, common.FromHex("0x40000910")).
|
||||||
AddFork("DENEB", 0, common.FromHex("0x50000910")).
|
AddFork("DENEB", 0, common.FromHex("0x50000910")).
|
||||||
AddFork("ELECTRA", 2048, common.FromHex("0x60000910")).
|
AddFork("ELECTRA", 2048, common.FromHex("0x60000910")).
|
||||||
AddFork("FULU", 18446744073709551615, common.FromHex("0x70000910"))
|
AddFork("FULU", 50688, common.FromHex("0x70000910"))
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ func BlockFromJSON(forkName string, data []byte) (*BeaconBlock, error) {
|
||||||
obj = new(capella.BeaconBlock)
|
obj = new(capella.BeaconBlock)
|
||||||
case "deneb":
|
case "deneb":
|
||||||
obj = new(deneb.BeaconBlock)
|
obj = new(deneb.BeaconBlock)
|
||||||
case "electra":
|
case "electra", "fulu":
|
||||||
obj = new(electra.BeaconBlock)
|
obj = new(electra.BeaconBlock)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unsupported fork: %s", forkName)
|
return nil, fmt.Errorf("unsupported fork: %s", forkName)
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, er
|
||||||
switch forkName {
|
switch forkName {
|
||||||
case "capella":
|
case "capella":
|
||||||
obj = new(capella.ExecutionPayloadHeader)
|
obj = new(capella.ExecutionPayloadHeader)
|
||||||
case "deneb", "electra": // note: the payload type was not changed in electra
|
case "deneb", "electra", "fulu": // note: the payload type was not changed in electra/fulu
|
||||||
obj = new(deneb.ExecutionPayloadHeader)
|
obj = new(deneb.ExecutionPayloadHeader)
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unsupported fork: %s", forkName)
|
return nil, fmt.Errorf("unsupported fork: %s", forkName)
|
||||||
|
|
|
||||||
|
|
@ -5,81 +5,102 @@
|
||||||
# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0
|
# https://github.com/ethereum/execution-spec-tests/releases/download/v5.1.0
|
||||||
a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz
|
a3192784375acec7eaec492799d5c5d0c47a2909a3cc40178898e4ecd20cc416 fixtures_develop.tar.gz
|
||||||
|
|
||||||
# version:golang 1.25.1
|
# version:golang 1.25.9
|
||||||
# https://go.dev/dl/
|
# https://go.dev/dl/
|
||||||
d010c109cee94d80efe681eab46bdea491ac906bf46583c32e9f0dbb0bd1a594 go1.25.1.src.tar.gz
|
0ec9ef8ebcea097aac37decae9f09a7218b451cd96be7d6ed513d8e4bcf909cf go1.25.9.src.tar.gz
|
||||||
1d622468f767a1b9fe1e1e67bd6ce6744d04e0c68712adc689748bbeccb126bb go1.25.1.darwin-amd64.tar.gz
|
b9ede6378a8f8d3d22bf52e68beb69ef7abdb65929ab2456020383002da15846 go1.25.9.aix-ppc64.tar.gz
|
||||||
68deebb214f39d542e518ebb0598a406ab1b5a22bba8ec9ade9f55fb4dd94a6c go1.25.1.darwin-arm64.tar.gz
|
92cb78fba4796e218c1accb0ea0a214ef2094c382049a244ad6505505d015fbe go1.25.9.darwin-amd64.tar.gz
|
||||||
d03cdcbc9bd8baf5cf028de390478e9e2b3e4d0afe5a6582dedc19bfe6a263b2 go1.25.1.linux-386.tar.gz
|
9528be7329b9770631a6bd09ca2f3a73ed7332bec01d87435e75e92d8f130363 go1.25.9.darwin-arm64.tar.gz
|
||||||
7716a0d940a0f6ae8e1f3b3f4f36299dc53e31b16840dbd171254312c41ca12e go1.25.1.linux-amd64.tar.gz
|
918e44a471c5524caa52f74185064240d5eb343aa8023d604776511fc7adffa6 go1.25.9.dragonfly-amd64.tar.gz
|
||||||
65a3e34fb2126f55b34e1edfc709121660e1be2dee6bdf405fc399a63a95a87d go1.25.1.linux-arm64.tar.gz
|
2d67dbdfd09c6fcaa0e64485367ef43b8837ea200c663d6417183237bcddf83d go1.25.9.freebsd-386.tar.gz
|
||||||
eb949be683e82a99e9861dafd7057e31ea40b161eae6c4cd18fdc0e8c4ae6225 go1.25.1.linux-armv6l.tar.gz
|
9152d0c0badbfeb0c0e148e47c12bec28099d8cf2db60958810c879e0b679d07 go1.25.9.freebsd-amd64.tar.gz
|
||||||
be13d5479b8c75438f2efcaa8c191fba3af684b3228abc9c99c7aa8502f34424 go1.25.1.windows-386.zip
|
437dca59604ad4a806a6a88e3d7ec1cd98ac9b402a3671629f4e553dd8b9888f go1.25.9.freebsd-arm.tar.gz
|
||||||
4a974de310e7ee1d523d2fcedb114ba5fa75408c98eb3652023e55ccf3fa7cab go1.25.1.windows-amd64.zip
|
4c0fe53977412036fc8081e8d0992bbaabe4d3e1926137271ba11c2f5753300f go1.25.9.freebsd-arm64.tar.gz
|
||||||
45ab4290adbd6ee9e7f18f0d57eaa9008fdbef590882778ed93eac3c8cca06c5 go1.25.1.aix-ppc64.tar.gz
|
d6087cdd1c084bd186132f29e0d032852a745f3c7619003d0fd5612c1fa58c8a go1.25.9.freebsd-riscv64.tar.gz
|
||||||
2e3c1549bed3124763774d648f291ac42611232f48320ebbd23517c909c09b81 go1.25.1.dragonfly-amd64.tar.gz
|
f82e49037e195cb62beae6a6ad83497157b2af5a01bad2f1dcb65df41080aabb go1.25.9.illumos-amd64.tar.gz
|
||||||
dc0198dd4ec520e13f26798def8750544edf6448d8e9c43fd2a814e4885932af go1.25.1.freebsd-386.tar.gz
|
1e14a73bc2b19e370e0d4c57ba87aabfe8aef1e435e14d246742d48a13254f36 go1.25.9.linux-386.tar.gz
|
||||||
c4f1a7e7b258406e6f3b677ecdbd97bbb23ff9c0d44be4eb238a07d360f69ac8 go1.25.1.freebsd-amd64.tar.gz
|
00859d7bd6defe8bf84d9db9e57b9a4467b2887c18cd93ae7460e713db774bc1 go1.25.9.linux-amd64.tar.gz
|
||||||
7772fc5ff71ed39297ec0c1599fc54e399642c9b848eac989601040923b0de9c go1.25.1.freebsd-arm.tar.gz
|
ec342e7389b7f489564ed5463c63b16cf8040023dabc7861256677165a8c0e2b go1.25.9.linux-arm64.tar.gz
|
||||||
5bb011d5d5b6218b12189f07aa0be618ab2002662fff1ca40afba7389735c207 go1.25.1.freebsd-arm64.tar.gz
|
7d4f0d266d871301e08ef4ac31c56e66048688893b2848392e5c600276351ee8 go1.25.9.linux-armv6l.tar.gz
|
||||||
ccac716240cb049bebfafcb7eebc3758512178a4c51fc26da9cc032035d850c8 go1.25.1.freebsd-riscv64.tar.gz
|
f3460d901a14496bc609636e4accf9110ee1869d41c64af7e29cd567cffcf49b go1.25.9.linux-loong64.tar.gz
|
||||||
cc53910ffb9fcfdd988a9fa25b5423bae1cfa01b19616be646700e1f5453b466 go1.25.1.illumos-amd64.tar.gz
|
1da96ea449382ff96c09c55cee74815324e01d687d5ac6d2ade58244b8574306 go1.25.9.linux-mips.tar.gz
|
||||||
efe809f923bcedab44bf7be2b3af8d182b512b1bf9c07d302e0c45d26c8f56f3 go1.25.1.linux-loong64.tar.gz
|
311a7f5f01f9a4bd51288b575eb619dc8e28e1fbc0cd78256a428b3ca668ff01 go1.25.9.linux-mips64.tar.gz
|
||||||
c0de33679f6ed68991dc42dc4a602e74a666e3e166c1748ee1b5d1a7ea2ffbb2 go1.25.1.linux-mips.tar.gz
|
0b4edaf9e2ba3f0a079547effda70ec6a4b51a6ca3271a1147652c87ebcf3735 go1.25.9.linux-mips64le.tar.gz
|
||||||
c270f7b0c0bdfbcd54fef4481227c40d41bb518f9ae38ee930870f04a0a6a589 go1.25.1.linux-mips64.tar.gz
|
42667340df264896f20b12261429d954e736e9772ab83ba289e68c30cf6f9628 go1.25.9.linux-mipsle.tar.gz
|
||||||
80be871ba9c944f34d1868cdf5047e1cf2e1289fe08cdb90e2453d2f0d6965ae go1.25.1.linux-mips64le.tar.gz
|
b9cbb3a4894b5aca6966c23452608435e8535278ef019b18d8898fbbfab67e74 go1.25.9.linux-ppc64.tar.gz
|
||||||
9f09defa9bb22ebf2cde76162f40958564e57ce5c2b3649bc063bebcbc9294c1 go1.25.1.linux-mipsle.tar.gz
|
b0c41c7da1fc8d39020d65296a0dc54167afd9f76d67064e22c31ce3d839a739 go1.25.9.linux-ppc64le.tar.gz
|
||||||
2c76b7d278c1d43ad19d478ad3f0f05e7b782b64b90870701b314fa48b5f43c6 go1.25.1.linux-ppc64.tar.gz
|
2a630be8f854177c13e5fa75f7812c721369ecb9bd6e4c0fb1bd1c708d08b37c go1.25.9.linux-riscv64.tar.gz
|
||||||
8b0c8d3ee5b1b5c28b6bd63dc4438792012e01d03b4bf7a61d985c87edab7d1f go1.25.1.linux-ppc64le.tar.gz
|
0cf55136ac7eaccfc36d849054f849510ea289c2d959ffbed7b3866b4f484d17 go1.25.9.linux-s390x.tar.gz
|
||||||
22fe934a9d0c9c57275716c55b92d46ebd887cec3177c9140705efa9f84ba1e2 go1.25.1.linux-riscv64.tar.gz
|
eaf8167ff10a6a3e5dd304ef5f2e020b3a7379e76fa1011dc49c895800bf367c go1.25.9.netbsd-386.tar.gz
|
||||||
9cfe517ba423f59f3738ca5c3d907c103253cffbbcc2987142f79c5de8c1bf93 go1.25.1.linux-s390x.tar.gz
|
3cc6a861e62e23feae660984e0f2f14a2efb5d1f655900afee1d51af98919ae4 go1.25.9.netbsd-amd64.tar.gz
|
||||||
6af8a08353e76205d5b743dd7a3f0126684f96f62be0a31b75daf9837e512c46 go1.25.1.netbsd-386.tar.gz
|
c2c44dca10e882c30553f4aa2ab8f6722b670fb12882378c8f461a9105d40188 go1.25.9.netbsd-arm.tar.gz
|
||||||
e5d534ff362edb1bd8c8e10892b6a027c4c1482454245d1529167676498684c7 go1.25.1.netbsd-amd64.tar.gz
|
f301b71a8ec448053a5d2597df2e178120204bc9a33266c81600dd5d020a61b4 go1.25.9.netbsd-arm64.tar.gz
|
||||||
88bcf39254fdcea6a199c1c27d787831b652427ce60851ae9e41a3d7eb477f45 go1.25.1.netbsd-arm.tar.gz
|
c4543b7fdef9707b4896810c69b4160a43ecec210af45c300f3abd78aa0c9e72 go1.25.9.openbsd-386.tar.gz
|
||||||
d7c2eabe1d04ee47bcaea2816fdd90dbd25d90d4dfa756faa9786c788e4f3a4e go1.25.1.netbsd-arm64.tar.gz
|
37275325e314f5ab7cf8ae65c4efc7cbfdaf20b41c6849549739b57a3ac97544 go1.25.9.openbsd-amd64.tar.gz
|
||||||
14a2845977eb4dde11d929858c437a043467c427db87899935e90cee04a38d72 go1.25.1.openbsd-386.tar.gz
|
f9c05b6b315e979ecdd47354dd287c01708d6a88dc6ae7af74c84df8fa00df94 go1.25.9.openbsd-arm.tar.gz
|
||||||
d27ac54b38a13a09c81e67c82ac70d387037341c85c3399291c73e13e83fdd8c go1.25.1.openbsd-amd64.tar.gz
|
4e999f42cf959ff95ca84af1ea1db3771000f5e57e157904bc2ffc72c75e29a2 go1.25.9.openbsd-arm64.tar.gz
|
||||||
0f4ab5f02500afa4befd51fed1e8b45e4d07ca050f641cc3acc76eaa4027b2c3 go1.25.1.openbsd-arm.tar.gz
|
0c7fa6c7c2b1cc13ad32fa94fc31273b4adf39c1e0f0e5dcedac158ff526af3f go1.25.9.openbsd-ppc64.tar.gz
|
||||||
d46c3bd156843656f7f3cb0dec27ea51cd926ec3f7b80744bf8156e67c1c812f go1.25.1.openbsd-arm64.tar.gz
|
347b33953a4b6e8df17719296f360f60878fe48a2d482ceb3637a3dfd4950065 go1.25.9.openbsd-riscv64.tar.gz
|
||||||
c550514c67f22e409be10e40eace761e2e43069f4ef086ae6e60aac736c2b679 go1.25.1.openbsd-ppc64.tar.gz
|
889f77d567c06832e0d332fe2458653dc66d43cded7ddbca6f72ce0ca60029cc go1.25.9.plan9-386.tar.gz
|
||||||
8a09a8714a2556eb13fc1f10b7ce2553fcea4971e3330fc3be0efd24aab45734 go1.25.1.openbsd-riscv64.tar.gz
|
978b1f931fadec2f2516237d2649ee845d93c8eaf47dd196cfd8d26c7b2706a1 go1.25.9.plan9-amd64.tar.gz
|
||||||
b0e1fefaf0c7abd71f139a54eee9767944aff5f0bc9d69c968234804884e552f go1.25.1.plan9-386.tar.gz
|
30b9565e5ad0a212fe00990ead700c751b416eb2ef8d7c91a204945a7ff83a48 go1.25.9.plan9-arm.tar.gz
|
||||||
e94732c94f149690aa0ab11c26090577211b4a988137cb2c03ec0b54e750402e go1.25.1.plan9-amd64.tar.gz
|
9e9125ff84ab3c3522ec758cab9540a17e9cba12bfcc34b6bf556cb89b522591 go1.25.9.solaris-amd64.tar.gz
|
||||||
7eb80e9de1e817d9089a54e8c7c5c8d8ed9e5fb4d4a012fc0f18fc422a484f0c go1.25.1.plan9-arm.tar.gz
|
bf40515f5f4d834fa9ead31ff75581e61a38ac27bf49840b95c5c998d321c0f6 go1.25.9.windows-386.zip
|
||||||
1261dfad7c4953c0ab90381bc1242dc54e394db7485c59349428d532b2273343 go1.25.1.solaris-amd64.tar.gz
|
a7a710e225467b34e9e09fb432b829c86c9b2da5821ee5418f7eb2e8ae1a22cc go1.25.9.windows-amd64.zip
|
||||||
04bc3c078e9e904c4d58d6ac2532a5bdd402bd36a9ff0b5949b3c5e6006a05ee go1.25.1.windows-arm64.zip
|
33cd73cf1b3ceee655ef71bc96e94006c02ae3c617fdd67ac9be3dfae3957449 go1.25.9.windows-arm64.zip
|
||||||
|
|
||||||
# version:golangci 2.4.0
|
# version:golangci 2.10.1
|
||||||
# https://github.com/golangci/golangci-lint/releases/
|
# https://github.com/golangci/golangci-lint/releases/
|
||||||
# https://github.com/golangci/golangci-lint/releases/download/v2.4.0/
|
# https://github.com/golangci/golangci-lint/releases/download/v2.10.1
|
||||||
7904ce63f79db44934939cf7a063086ea0ea98e9b19eba0a9d52ccdd0d21951c golangci-lint-2.4.0-darwin-amd64.tar.gz
|
66fb0da81b8033b477f97eea420d4b46b230ca172b8bb87c6610109f3772b6b6 golangci-lint-2.10.1-darwin-amd64.tar.gz
|
||||||
cd4dd53fa09b6646baff5fd22b8c64d91db02c21c7496df27992d75d34feec59 golangci-lint-2.4.0-darwin-arm64.tar.gz
|
03bfadf67e52b441b7ec21305e501c717df93c959836d66c7f97312654acb297 golangci-lint-2.10.1-darwin-arm64.tar.gz
|
||||||
d58f426ebe14cc257e81562b4bf37a488ffb4ffbbb3ec73041eb3b38bb25c0e1 golangci-lint-2.4.0-freebsd-386.tar.gz
|
c9a44658ccc8f7b8dbbd4ae6020ba91c1a5d3987f4d91ced0f7d2bea013e57ca golangci-lint-2.10.1-freebsd-386.tar.gz
|
||||||
6ec4a6177fc6c0dd541fbcb3a7612845266d020d35cc6fa92959220cdf64ca39 golangci-lint-2.4.0-freebsd-amd64.tar.gz
|
a513c5cb4e0f5bd5767001af9d5e97e7868cfc2d9c46739a4df93e713cfb24af golangci-lint-2.10.1-freebsd-amd64.tar.gz
|
||||||
4d473e3e71c01feaa915a0604fb35758b41284fb976cdeac3f842118d9ee7e17 golangci-lint-2.4.0-freebsd-armv6.tar.gz
|
2ef38eefc4b5cee2febacb75a30579526e5656c16338a921d80e59a8e87d4425 golangci-lint-2.10.1-freebsd-arm64.tar.gz
|
||||||
58727746c6530801a3f9a702a5945556a5eb7e88809222536dd9f9d54cafaeff golangci-lint-2.4.0-freebsd-armv7.tar.gz
|
8fea6766318b4829e766bbe325f10191d75297dcc44ae35bf374816037878e38 golangci-lint-2.10.1-freebsd-armv6.tar.gz
|
||||||
fbf28c662760e24c32f82f8d16dffdb4a82de7726a52ba1fad94f890c22997ea golangci-lint-2.4.0-illumos-amd64.tar.gz
|
30b629870574d6254f3e8804e5a74b34f98e1263c9d55465830d739c88b862ed golangci-lint-2.10.1-freebsd-armv7.tar.gz
|
||||||
a15a000a8981ef665e971e0f67e2acda9066a9e37a59344393b7351d8fb49c81 golangci-lint-2.4.0-linux-386.tar.gz
|
c0db839f866ce80b1b6c96167aa101cfe50d9c936f42d942a3c1cbdc1801af68 golangci-lint-2.10.1-illumos-amd64.tar.gz
|
||||||
fae792524c04424c0ac369f5b8076f04b45cf29fc945a370e55d369a8dc11840 golangci-lint-2.4.0-linux-amd64.tar.gz
|
280eb56636e9175f671cd7b755d7d67f628ae2ed00a164d1e443c43c112034e5 golangci-lint-2.10.1-linux-386.deb
|
||||||
70ac11f55b80ec78fd3a879249cc9255121b8dfd7f7ed4fc46ed137f4abf17e7 golangci-lint-2.4.0-linux-arm64.tar.gz
|
065a7d99da61dc7dfbfef2e2d7053dd3fa6672598f2747117aa4bb5f45e7df7f golangci-lint-2.10.1-linux-386.rpm
|
||||||
4acdc40e5cebe99e4e7ced358a05b2e71789f409b41cb4f39bbb86ccfa14b1dc golangci-lint-2.4.0-linux-armv6.tar.gz
|
a55918c03bb413b2662287653ab2ae2fef4e37428b247dad6348724adde9d770 golangci-lint-2.10.1-linux-386.tar.gz
|
||||||
2a68749568fa22b4a97cb88dbea655595563c795076536aa6c087f7968784bf3 golangci-lint-2.4.0-linux-armv7.tar.gz
|
8aa9b3aa14f39745eeb7fc7ff50bcac683e785397d1e4bc9afd2184b12c4ce86 golangci-lint-2.10.1-linux-amd64.deb
|
||||||
9e3369afb023711036dcb0b4f45c9fe2792af962fa1df050c9f6ac101a6c5d73 golangci-lint-2.4.0-linux-loong64.tar.gz
|
62a111688e9e305032334a2cbc84f4d971b64bb3bffc99d3f80081d57fb25e32 golangci-lint-2.10.1-linux-amd64.rpm
|
||||||
bb9143d6329be2c4dbfffef9564078e7da7d88e7dde6c829b6263d98e072229e golangci-lint-2.4.0-linux-mips64.tar.gz
|
dfa775874cf0561b404a02a8f4481fc69b28091da95aa697259820d429b09c99 golangci-lint-2.10.1-linux-amd64.tar.gz
|
||||||
5ad1765b40d56cd04d4afd805b3ba6f4bfd9b36181da93c31e9b17e483d8608d golangci-lint-2.4.0-linux-mips64le.tar.gz
|
b3f36937e8ea1660739dc0f5c892ea59c9c21ed4e75a91a25957c561f7f79a55 golangci-lint-2.10.1-linux-arm64.deb
|
||||||
918936fb9c0d5ba96bef03cf4348b03938634cfcced49be1e9bb29cb5094fa73 golangci-lint-2.4.0-linux-ppc64le.tar.gz
|
36d50314d53683b1f1a2a6cedfb5a9468451b481c64ab9e97a8e843ea088074d golangci-lint-2.10.1-linux-arm64.rpm
|
||||||
f7474c638e1fb67ebbdc654b55ca0125377ea0bc88e8fee8d964a4f24eacf828 golangci-lint-2.4.0-linux-riscv64.tar.gz
|
6652b42ae02915eb2f9cb2a2e0cac99514c8eded8388d88ae3e06e1a52c00de8 golangci-lint-2.10.1-linux-arm64.tar.gz
|
||||||
b617a9543997c8bfceaffa88a75d4e595030c6add69fba800c1e4d8f5fe253dd golangci-lint-2.4.0-linux-s390x.tar.gz
|
a32d8d318e803496812dd3461f250e52ccc7f53c47b95ce404a9cf55778ceb6a golangci-lint-2.10.1-linux-armv6.deb
|
||||||
7db027b03a9ba328f795215b04f594036837bc7dd0dd7cd16776b02a6167981c golangci-lint-2.4.0-netbsd-386.tar.gz
|
41d065f4c8ea165a1531abea644988ee2e973e4f0b49f9725ed3b979dac45112 golangci-lint-2.10.1-linux-armv6.rpm
|
||||||
52d8f9393f4313df0a62b752c37775e3af0b818e43e8dd28954351542d7c60bc golangci-lint-2.4.0-netbsd-amd64.tar.gz
|
59159a4df03aabbde69d15c7b7b3df143363cbb41f4bd4b200caffb8e34fb734 golangci-lint-2.10.1-linux-armv6.tar.gz
|
||||||
5c0086027fb5a4af3829e530c8115db4b35d11afe1914322eef528eb8cd38c69 golangci-lint-2.4.0-netbsd-arm64.tar.gz
|
b2e8ec0e050a1e2251dfe1561434999d202f5a3f9fa47ce94378b0fd1662ea5a golangci-lint-2.10.1-linux-armv7.deb
|
||||||
6b779d6ed1aed87cefe195cc11759902b97a76551b593312c6833f2635a3488f golangci-lint-2.4.0-netbsd-armv6.tar.gz
|
28c9331429a497da27e9c77846063bd0e8275e878ffedb4eb9e9f21d24771cc0 golangci-lint-2.10.1-linux-armv7.rpm
|
||||||
f00d1f4b7ec3468a0f9fffd0d9ea036248b029b7621cbc9a59c449ef94356d09 golangci-lint-2.4.0-netbsd-armv7.tar.gz
|
818f33e95b273e3769284b25563b51ef6a294e9e25acf140fda5830c075a1a59 golangci-lint-2.10.1-linux-armv7.tar.gz
|
||||||
3ce671b0b42b58e35066493aab75a7e2826c9e079988f1ba5d814a4029faaf87 golangci-lint-2.4.0-windows-386.zip
|
6b6b85ed4b7c27f51097dd681523000409dde835e86e6e314e87be4bb013e2ab golangci-lint-2.10.1-linux-loong64.deb
|
||||||
003112f7a56746feaabf20b744054bf9acdf900c9e77176383623c4b1d76aaa9 golangci-lint-2.4.0-windows-amd64.zip
|
94050a0cf06169e2ae44afb307dcaafa7d7c3b38c0c23b5652cf9cb60f0c337f golangci-lint-2.10.1-linux-loong64.rpm
|
||||||
dc0c2092af5d47fc2cd31a1dfe7b4c7e765fab22de98bd21ef2ffcc53ad9f54f golangci-lint-2.4.0-windows-arm64.zip
|
25820300fccb8c961c1cdcb1f77928040c079e04c43a3a5ceb34b1cb4a1c5c8d golangci-lint-2.10.1-linux-loong64.tar.gz
|
||||||
0263d23e20a260cb1592d35e12a388f99efe2c51b3611fdc66fbd9db1fce664d golangci-lint-2.4.0-windows-armv6.zip
|
98bf39d10139fdcaa37f94950e9bbb8888660ae468847ae0bf1cb5bf67c1f68b golangci-lint-2.10.1-linux-mips64.deb
|
||||||
9403c03bf648e6313036e0273149d44bad1b9ad53889b6d00e4ccb842ba3c058 golangci-lint-2.4.0-windows-armv7.zip
|
df3ce5f03808dcceaa8b683d1d06e95c885f09b59dc8e15deb840fbe2b3e3299 golangci-lint-2.10.1-linux-mips64.rpm
|
||||||
|
972508dda523067e6e6a1c8e6609d63bc7c4153819c11b947d439235cf17bac2 golangci-lint-2.10.1-linux-mips64.tar.gz
|
||||||
|
1d37f2919e183b5bf8b1777ed8c4b163d3b491d0158355a7999d647655cbbeb6 golangci-lint-2.10.1-linux-mips64le.deb
|
||||||
|
e341d031002cd09a416329ed40f674231051a38544b8f94deb2d1708ce1f4a6f golangci-lint-2.10.1-linux-mips64le.rpm
|
||||||
|
393560122b9cb5538df0c357d30eb27b6ee563533fbb9b138c8db4fd264002af golangci-lint-2.10.1-linux-mips64le.tar.gz
|
||||||
|
21ca46b6a96442e8957677a3ca059c6b93674a68a01b1c71f4e5df0ea2e96d19 golangci-lint-2.10.1-linux-ppc64le.deb
|
||||||
|
57fe0cbca0a9bbdf1547c5e8aa7d278e6896b438d72a541bae6bc62c38b43d1e golangci-lint-2.10.1-linux-ppc64le.rpm
|
||||||
|
e2883db9fa51584e5e203c64456f29993550a7faadc84e3faccdb48f0669992e golangci-lint-2.10.1-linux-ppc64le.tar.gz
|
||||||
|
aa6da0e98ab0ba3bb7582e112174c349907d5edfeff90a551dca3c6eecf92fc0 golangci-lint-2.10.1-linux-riscv64.deb
|
||||||
|
3c68d76cd884a7aad206223a980b9c20bb9ea74b560fa27ed02baf2389189234 golangci-lint-2.10.1-linux-riscv64.rpm
|
||||||
|
3bca11bfac4197205639cbd4676a5415054e629ac6c12ea10fcbe33ef852d9c3 golangci-lint-2.10.1-linux-riscv64.tar.gz
|
||||||
|
0c6aed2ce49db2586adbac72c80d871f06feb1caf4c0763a5ca98fec809a8f0b golangci-lint-2.10.1-linux-s390x.deb
|
||||||
|
16c285adfe1061d69dd8e503be69f87c7202857c6f4add74ac02e3571158fbec golangci-lint-2.10.1-linux-s390x.rpm
|
||||||
|
21011ad368eb04f024201b832095c6b5f96d0888de194cca5bfe4d9307d6364b golangci-lint-2.10.1-linux-s390x.tar.gz
|
||||||
|
7b5191e77a70485918712e31ed55159956323e4911bab1b67569c9d86e1b75eb golangci-lint-2.10.1-netbsd-386.tar.gz
|
||||||
|
07801fd38d293ebad10826f8285525a39ea91ce5ddad77d05bfa90bda9c884a9 golangci-lint-2.10.1-netbsd-amd64.tar.gz
|
||||||
|
7e7219d71c1bf33b98c328c93dc0560706dd896a1c43c44696e5222fc9d7446e golangci-lint-2.10.1-netbsd-arm64.tar.gz
|
||||||
|
92fbc90b9eec0e572269b0f5492a2895c426b086a68372fde49b7e4d4020863e golangci-lint-2.10.1-netbsd-armv6.tar.gz
|
||||||
|
f67b3ae1f47caeefa507a4ebb0c8336958a19011fe48766443212030f75d004b golangci-lint-2.10.1-netbsd-armv7.tar.gz
|
||||||
|
a40bc091c10cea84eaee1a90b84b65f5e8652113b0a600bb099e4e4d9d7caddb golangci-lint-2.10.1-windows-386.zip
|
||||||
|
c60c87695e79db8e320f0e5be885059859de52bb5ee5f11be5577828570bc2a3 golangci-lint-2.10.1-windows-amd64.zip
|
||||||
|
636ab790c8dcea8034aa34aba6031ca3893d68f7eda000460ab534341fadbab1 golangci-lint-2.10.1-windows-arm64.zip
|
||||||
|
|
||||||
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
# This is the builder on PPA that will build Go itself (inception-y), don't modify!
|
||||||
#
|
#
|
||||||
|
|
|
||||||
35
build/ci.go
35
build/ci.go
|
|
@ -107,6 +107,22 @@ var (
|
||||||
Tags: "ziren",
|
Tags: "ziren",
|
||||||
Env: map[string]string{"GOMIPS": "softfloat", "CGO_ENABLED": "0"},
|
Env: map[string]string{"GOMIPS": "softfloat", "CGO_ENABLED": "0"},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "womir",
|
||||||
|
GOOS: "wasip1",
|
||||||
|
GOARCH: "wasm",
|
||||||
|
Tags: "womir",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "wasm-js",
|
||||||
|
GOOS: "js",
|
||||||
|
GOARCH: "wasm",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "wasm-wasi",
|
||||||
|
GOOS: "wasip1",
|
||||||
|
GOARCH: "wasm",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "example",
|
Name: "example",
|
||||||
Tags: "example",
|
Tags: "example",
|
||||||
|
|
@ -156,8 +172,6 @@ var (
|
||||||
"focal", // 20.04, EOL: 04/2030
|
"focal", // 20.04, EOL: 04/2030
|
||||||
"jammy", // 22.04, EOL: 04/2032
|
"jammy", // 22.04, EOL: 04/2032
|
||||||
"noble", // 24.04, EOL: 04/2034
|
"noble", // 24.04, EOL: 04/2034
|
||||||
"oracular", // 24.10, EOL: 07/2025
|
|
||||||
"plucky", // 25.04, EOL: 01/2026
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is where the tests should be unpacked.
|
// This is where the tests should be unpacked.
|
||||||
|
|
@ -295,7 +309,7 @@ func doInstallKeeper(cmdline []string) {
|
||||||
args := slices.Clone(gobuild.Args)
|
args := slices.Clone(gobuild.Args)
|
||||||
args = append(args, "-o", executablePath(outputName))
|
args = append(args, "-o", executablePath(outputName))
|
||||||
args = append(args, ".")
|
args = append(args, ".")
|
||||||
build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env})
|
build.MustRun(&exec.Cmd{Path: gobuild.Path, Args: args, Env: gobuild.Env, Dir: gobuild.Dir})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -331,6 +345,10 @@ func buildFlags(env build.Environment, staticLinking bool, buildTags []string) (
|
||||||
}
|
}
|
||||||
ld = append(ld, "-extldflags", "'"+strings.Join(extld, " ")+"'")
|
ld = append(ld, "-extldflags", "'"+strings.Join(extld, " ")+"'")
|
||||||
}
|
}
|
||||||
|
// TODO(gballet): revisit after the input api has been defined
|
||||||
|
if runtime.GOARCH == "wasm" {
|
||||||
|
ld = append(ld, "-gcflags=all=-d=softfloat")
|
||||||
|
}
|
||||||
if len(ld) > 0 {
|
if len(ld) > 0 {
|
||||||
flags = append(flags, "-ldflags", strings.Join(ld, " "))
|
flags = append(flags, "-ldflags", strings.Join(ld, " "))
|
||||||
}
|
}
|
||||||
|
|
@ -446,9 +464,14 @@ func doCheckGenerate() {
|
||||||
)
|
)
|
||||||
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
|
pathList := []string{filepath.Join(protocPath, "bin"), protocGenGoPath, os.Getenv("PATH")}
|
||||||
|
|
||||||
|
excludes := []string{"tests/testdata", "build/cache", ".git"}
|
||||||
|
for i := range excludes {
|
||||||
|
excludes[i] = filepath.FromSlash(excludes[i])
|
||||||
|
}
|
||||||
|
|
||||||
for _, mod := range goModules {
|
for _, mod := range goModules {
|
||||||
// Compute the origin hashes of all the files
|
// Compute the origin hashes of all the files
|
||||||
hashes, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"})
|
hashes, err := build.HashFolder(mod, excludes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Error computing hashes", "err", err)
|
log.Fatal("Error computing hashes", "err", err)
|
||||||
}
|
}
|
||||||
|
|
@ -458,7 +481,7 @@ func doCheckGenerate() {
|
||||||
c.Dir = mod
|
c.Dir = mod
|
||||||
build.MustRun(c)
|
build.MustRun(c)
|
||||||
// Check if generate file hashes have changed
|
// Check if generate file hashes have changed
|
||||||
generated, err := build.HashFolder(mod, []string{"tests/testdata", "build/cache", ".git"})
|
generated, err := build.HashFolder(mod, excludes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error re-computing hashes: %v", err)
|
log.Fatalf("Error re-computing hashes: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1177,7 +1200,7 @@ func doWindowsInstaller(cmdline []string) {
|
||||||
var (
|
var (
|
||||||
arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging")
|
arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging")
|
||||||
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`)
|
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`)
|
||||||
signify = flag.String("signify key", "", `Environment variable holding the signify signing key (e.g. WINDOWS_SIGNIFY_KEY)`)
|
signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. WINDOWS_SIGNIFY_KEY)`)
|
||||||
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
|
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
|
||||||
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
|
||||||
)
|
)
|
||||||
|
|
|
||||||
32
circle.yml
32
circle.yml
|
|
@ -1,32 +0,0 @@
|
||||||
machine:
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
dependencies:
|
|
||||||
cache_directories:
|
|
||||||
- "~/.ethash" # Cache the ethash DAG generated by hive for consecutive builds
|
|
||||||
- "~/.docker" # Cache all docker images manually to avoid lengthy rebuilds
|
|
||||||
override:
|
|
||||||
# Restore all previously cached docker images
|
|
||||||
- mkdir -p ~/.docker
|
|
||||||
- for img in `ls ~/.docker`; do docker load -i ~/.docker/$img; done
|
|
||||||
|
|
||||||
# Pull in and hive, restore cached ethash DAGs and do a dry run
|
|
||||||
- go get -u github.com/karalabe/hive
|
|
||||||
- (cd ~/.go_workspace/src/github.com/karalabe/hive && mkdir -p workspace/ethash/ ~/.ethash)
|
|
||||||
- (cd ~/.go_workspace/src/github.com/karalabe/hive && cp -r ~/.ethash/. workspace/ethash/)
|
|
||||||
- (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=NONE --test=. --sim=. --loglevel=6)
|
|
||||||
|
|
||||||
# Cache all the docker images and the ethash DAGs
|
|
||||||
- for img in `docker images | grep -v "^<none>" | tail -n +2 | awk '{print $1}'`; do docker save $img > ~/.docker/`echo $img | tr '/' ':'`.tar; done
|
|
||||||
- cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/ethash/. ~/.ethash
|
|
||||||
|
|
||||||
test:
|
|
||||||
override:
|
|
||||||
# Build Geth and move into a known folder
|
|
||||||
- make geth
|
|
||||||
- cp ./build/bin/geth $HOME/geth
|
|
||||||
|
|
||||||
# Run hive and move all generated logs into the public artifacts folder
|
|
||||||
- (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=go-ethereum:local --override=$HOME/geth --test=. --sim=.)
|
|
||||||
- cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/logs/* $CIRCLE_ARTIFACTS
|
|
||||||
|
|
@ -51,6 +51,12 @@ type Chain struct {
|
||||||
state map[common.Address]state.DumpAccount // state of head block
|
state map[common.Address]state.DumpAccount // state of head block
|
||||||
senders map[common.Address]*senderInfo
|
senders map[common.Address]*senderInfo
|
||||||
config *params.ChainConfig
|
config *params.ChainConfig
|
||||||
|
|
||||||
|
txInfo txInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type txInfo struct {
|
||||||
|
LargeReceiptBlock *uint64 `json:"tx-largereceipt"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewChain takes the given chain.rlp file, and decodes and returns
|
// NewChain takes the given chain.rlp file, and decodes and returns
|
||||||
|
|
@ -74,12 +80,20 @@ func NewChain(dir string) (*Chain, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var txInfo txInfo
|
||||||
|
err = common.LoadJSON(filepath.Join(dir, "txinfo.json"), &txInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &Chain{
|
return &Chain{
|
||||||
genesis: gen,
|
genesis: gen,
|
||||||
blocks: blocks,
|
blocks: blocks,
|
||||||
state: state,
|
state: state,
|
||||||
senders: accounts,
|
senders: accounts,
|
||||||
config: gen.Config,
|
config: gen.Config,
|
||||||
|
txInfo: txInfo,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -66,9 +66,10 @@ func (s *Suite) dialAs(key *ecdsa.PrivateKey) (*Conn, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
conn.caps = []p2p.Cap{
|
conn.caps = []p2p.Cap{
|
||||||
|
{Name: "eth", Version: 70},
|
||||||
{Name: "eth", Version: 69},
|
{Name: "eth", Version: 69},
|
||||||
}
|
}
|
||||||
conn.ourHighestProtoVersion = 69
|
conn.ourHighestProtoVersion = 70
|
||||||
return &conn, nil
|
return &conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -155,7 +156,7 @@ func (c *Conn) ReadEth() (any, error) {
|
||||||
var msg any
|
var msg any
|
||||||
switch int(code) {
|
switch int(code) {
|
||||||
case eth.StatusMsg:
|
case eth.StatusMsg:
|
||||||
msg = new(eth.StatusPacket69)
|
msg = new(eth.StatusPacket)
|
||||||
case eth.GetBlockHeadersMsg:
|
case eth.GetBlockHeadersMsg:
|
||||||
msg = new(eth.GetBlockHeadersPacket)
|
msg = new(eth.GetBlockHeadersPacket)
|
||||||
case eth.BlockHeadersMsg:
|
case eth.BlockHeadersMsg:
|
||||||
|
|
@ -164,10 +165,6 @@ func (c *Conn) ReadEth() (any, error) {
|
||||||
msg = new(eth.GetBlockBodiesPacket)
|
msg = new(eth.GetBlockBodiesPacket)
|
||||||
case eth.BlockBodiesMsg:
|
case eth.BlockBodiesMsg:
|
||||||
msg = new(eth.BlockBodiesPacket)
|
msg = new(eth.BlockBodiesPacket)
|
||||||
case eth.NewBlockMsg:
|
|
||||||
msg = new(eth.NewBlockPacket)
|
|
||||||
case eth.NewBlockHashesMsg:
|
|
||||||
msg = new(eth.NewBlockHashesPacket)
|
|
||||||
case eth.TransactionsMsg:
|
case eth.TransactionsMsg:
|
||||||
msg = new(eth.TransactionsPacket)
|
msg = new(eth.TransactionsPacket)
|
||||||
case eth.NewPooledTransactionHashesMsg:
|
case eth.NewPooledTransactionHashesMsg:
|
||||||
|
|
@ -229,7 +226,7 @@ func (c *Conn) ReadSnap() (any, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// dialAndPeer creates a peer connection and runs the handshake.
|
// dialAndPeer creates a peer connection and runs the handshake.
|
||||||
func (s *Suite) dialAndPeer(status *eth.StatusPacket69) (*Conn, error) {
|
func (s *Suite) dialAndPeer(status *eth.StatusPacket) (*Conn, error) {
|
||||||
c, err := s.dial()
|
c, err := s.dial()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -242,7 +239,7 @@ func (s *Suite) dialAndPeer(status *eth.StatusPacket69) (*Conn, error) {
|
||||||
|
|
||||||
// peer performs both the protocol handshake and the status message
|
// peer performs both the protocol handshake and the status message
|
||||||
// exchange with the node in order to peer with it.
|
// exchange with the node in order to peer with it.
|
||||||
func (c *Conn) peer(chain *Chain, status *eth.StatusPacket69) error {
|
func (c *Conn) peer(chain *Chain, status *eth.StatusPacket) error {
|
||||||
if err := c.handshake(); err != nil {
|
if err := c.handshake(); err != nil {
|
||||||
return fmt.Errorf("handshake failed: %v", err)
|
return fmt.Errorf("handshake failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -315,7 +312,7 @@ func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// statusExchange performs a `Status` message exchange with the given node.
|
// statusExchange performs a `Status` message exchange with the given node.
|
||||||
func (c *Conn) statusExchange(chain *Chain, status *eth.StatusPacket69) error {
|
func (c *Conn) statusExchange(chain *Chain, status *eth.StatusPacket) error {
|
||||||
loop:
|
loop:
|
||||||
for {
|
for {
|
||||||
code, data, err := c.Read()
|
code, data, err := c.Read()
|
||||||
|
|
@ -324,7 +321,7 @@ loop:
|
||||||
}
|
}
|
||||||
switch code {
|
switch code {
|
||||||
case eth.StatusMsg + protoOffset(ethProto):
|
case eth.StatusMsg + protoOffset(ethProto):
|
||||||
msg := new(eth.StatusPacket69)
|
msg := new(eth.StatusPacket)
|
||||||
if err := rlp.DecodeBytes(data, &msg); err != nil {
|
if err := rlp.DecodeBytes(data, &msg); err != nil {
|
||||||
return fmt.Errorf("error decoding status packet: %w", err)
|
return fmt.Errorf("error decoding status packet: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -339,10 +336,12 @@ loop:
|
||||||
if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) {
|
if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) {
|
||||||
return fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want)
|
return fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want)
|
||||||
}
|
}
|
||||||
if have, want := msg.ProtocolVersion, c.ourHighestProtoVersion; have != uint32(want) {
|
for _, cap := range c.caps {
|
||||||
return fmt.Errorf("wrong protocol version: have %v, want %v", have, want)
|
if cap.Name == "eth" && cap.Version == uint(msg.ProtocolVersion) {
|
||||||
}
|
|
||||||
break loop
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("wrong protocol version: have %v, want %v", msg.ProtocolVersion, c.caps)
|
||||||
case discMsg:
|
case discMsg:
|
||||||
var msg []p2p.DiscReason
|
var msg []p2p.DiscReason
|
||||||
if rlp.DecodeBytes(data, &msg); len(msg) == 0 {
|
if rlp.DecodeBytes(data, &msg); len(msg) == 0 {
|
||||||
|
|
@ -363,7 +362,7 @@ loop:
|
||||||
}
|
}
|
||||||
if status == nil {
|
if status == nil {
|
||||||
// default status message
|
// default status message
|
||||||
status = ð.StatusPacket69{
|
status = ð.StatusPacket{
|
||||||
ProtocolVersion: uint32(c.negotiatedProtoVersion),
|
ProtocolVersion: uint32(c.negotiatedProtoVersion),
|
||||||
NetworkID: chain.config.ChainID.Uint64(),
|
NetworkID: chain.config.ChainID.Uint64(),
|
||||||
Genesis: chain.blocks[0].Hash(),
|
Genesis: chain.blocks[0].Hash(),
|
||||||
|
|
|
||||||
|
|
@ -86,9 +86,3 @@ func protoOffset(proto Proto) uint64 {
|
||||||
panic("unhandled protocol")
|
panic("unhandled protocol")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// msgTypePtr is the constraint for protocol message types.
|
|
||||||
type msgTypePtr[U any] interface {
|
|
||||||
*U
|
|
||||||
Kind() byte
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
)
|
)
|
||||||
|
|
@ -86,9 +87,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
root: root,
|
root: root,
|
||||||
startingHash: zero,
|
startingHash: zero,
|
||||||
limitHash: ffHash,
|
limitHash: ffHash,
|
||||||
expAccounts: 67,
|
expAccounts: 68,
|
||||||
expFirst: firstKey,
|
expFirst: firstKey,
|
||||||
expLast: common.HexToHash("0x622e662246601dd04f996289ce8b85e86db7bb15bb17f86487ec9d543ddb6f9a"),
|
expLast: common.HexToHash("0x59312f89c13e9e24c1cb8b103aa39a9b2800348d97a92c2c9e2a78fa02b70025"),
|
||||||
desc: "In this test, we request the entire state range, but limit the response to 4000 bytes.",
|
desc: "In this test, we request the entire state range, but limit the response to 4000 bytes.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -96,9 +97,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
root: root,
|
root: root,
|
||||||
startingHash: zero,
|
startingHash: zero,
|
||||||
limitHash: ffHash,
|
limitHash: ffHash,
|
||||||
expAccounts: 49,
|
expAccounts: 50,
|
||||||
expFirst: firstKey,
|
expFirst: firstKey,
|
||||||
expLast: common.HexToHash("0x445cb5c1278fdce2f9cbdb681bdd76c52f8e50e41dbd9e220242a69ba99ac099"),
|
expLast: common.HexToHash("0x4615e5f5df5b25349a00ad313c6cd0436b6c08ee5826e33a018661997f85ebaa"),
|
||||||
desc: "In this test, we request the entire state range, but limit the response to 3000 bytes.",
|
desc: "In this test, we request the entire state range, but limit the response to 3000 bytes.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -106,9 +107,9 @@ func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
root: root,
|
root: root,
|
||||||
startingHash: zero,
|
startingHash: zero,
|
||||||
limitHash: ffHash,
|
limitHash: ffHash,
|
||||||
expAccounts: 34,
|
expAccounts: 35,
|
||||||
expFirst: firstKey,
|
expFirst: firstKey,
|
||||||
expLast: common.HexToHash("0x2ef46ebd2073cecde499c2e8df028ad79a26d57bfaa812c4c6f7eb4c9617b913"),
|
expLast: common.HexToHash("0x2de4bdbddcfbb9c3e195dae6b45f9c38daff897e926764bf34887fb0db5c3284"),
|
||||||
desc: "In this test, we request the entire state range, but limit the response to 2000 bytes.",
|
desc: "In this test, we request the entire state range, but limit the response to 2000 bytes.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -177,9 +178,9 @@ The server should return the first available account.`,
|
||||||
root: root,
|
root: root,
|
||||||
startingHash: firstKey,
|
startingHash: firstKey,
|
||||||
limitHash: ffHash,
|
limitHash: ffHash,
|
||||||
expAccounts: 67,
|
expAccounts: 68,
|
||||||
expFirst: firstKey,
|
expFirst: firstKey,
|
||||||
expLast: common.HexToHash("0x622e662246601dd04f996289ce8b85e86db7bb15bb17f86487ec9d543ddb6f9a"),
|
expLast: common.HexToHash("0x59312f89c13e9e24c1cb8b103aa39a9b2800348d97a92c2c9e2a78fa02b70025"),
|
||||||
desc: `In this test, startingHash is exactly the first available account key.
|
desc: `In this test, startingHash is exactly the first available account key.
|
||||||
The server should return the first available account of the state as the first item.`,
|
The server should return the first available account of the state as the first item.`,
|
||||||
},
|
},
|
||||||
|
|
@ -188,9 +189,9 @@ The server should return the first available account of the state as the first i
|
||||||
root: root,
|
root: root,
|
||||||
startingHash: hashAdd(firstKey, 1),
|
startingHash: hashAdd(firstKey, 1),
|
||||||
limitHash: ffHash,
|
limitHash: ffHash,
|
||||||
expAccounts: 67,
|
expAccounts: 68,
|
||||||
expFirst: secondKey,
|
expFirst: secondKey,
|
||||||
expLast: common.HexToHash("0x66192e4c757fba1cdc776e6737008f42d50370d3cd801db3624274283bf7cd63"),
|
expLast: common.HexToHash("0x59a7c8818f1c16b298a054020dc7c3f403a970d1d1db33f9478b1c36e3a2e509"),
|
||||||
desc: `In this test, startingHash is after the first available key.
|
desc: `In this test, startingHash is after the first available key.
|
||||||
The server should return the second account of the state as the first item.`,
|
The server should return the second account of the state as the first item.`,
|
||||||
},
|
},
|
||||||
|
|
@ -226,9 +227,9 @@ server to return no data because genesis is older than 127 blocks.`,
|
||||||
root: s.chain.RootAt(int(s.chain.Head().Number().Uint64()) - 127),
|
root: s.chain.RootAt(int(s.chain.Head().Number().Uint64()) - 127),
|
||||||
startingHash: zero,
|
startingHash: zero,
|
||||||
limitHash: ffHash,
|
limitHash: ffHash,
|
||||||
expAccounts: 66,
|
expAccounts: 68,
|
||||||
expFirst: firstKey,
|
expFirst: firstKey,
|
||||||
expLast: common.HexToHash("0x729953a43ed6c913df957172680a17e5735143ad767bda8f58ac84ec62fbec5e"),
|
expLast: common.HexToHash("0x683b6c03cc32afe5db8cb96050f711fdaff8f8ff44c7587a9a848f921d02815e"),
|
||||||
desc: `This test requests data at a state root that is 127 blocks old.
|
desc: `This test requests data at a state root that is 127 blocks old.
|
||||||
We expect the server to have this state available.`,
|
We expect the server to have this state available.`,
|
||||||
},
|
},
|
||||||
|
|
@ -657,8 +658,8 @@ The server should reject the request.`,
|
||||||
// It's a bit unfortunate these are hard-coded, but the result depends on
|
// It's a bit unfortunate these are hard-coded, but the result depends on
|
||||||
// a lot of aspects of the state trie and can't be guessed in a simple
|
// a lot of aspects of the state trie and can't be guessed in a simple
|
||||||
// way. So you'll have to update this when the test chain is changed.
|
// way. So you'll have to update this when the test chain is changed.
|
||||||
common.HexToHash("0x5bdc0d6057b35642a16d27223ea5454e5a17a400e28f7328971a5f2a87773b76"),
|
common.HexToHash("0x4bdecec09691ad38113eebee2df94fadefdff5841c0f182bae1be3c8a6d60bf3"),
|
||||||
common.HexToHash("0x0a76c9812ca90ffed8ee4d191e683f93386b6e50cfe3679c0760d27510aa7fc5"),
|
common.HexToHash("0x4178696465d4514ff5924ef8c28ce64d41a669634b63184c2c093e252d6b4bc4"),
|
||||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
|
@ -678,8 +679,8 @@ The server should reject the request.`,
|
||||||
// be updated when the test chain is changed.
|
// be updated when the test chain is changed.
|
||||||
expHashes: []common.Hash{
|
expHashes: []common.Hash{
|
||||||
empty,
|
empty,
|
||||||
common.HexToHash("0x0a76c9812ca90ffed8ee4d191e683f93386b6e50cfe3679c0760d27510aa7fc5"),
|
common.HexToHash("0x4178696465d4514ff5924ef8c28ce64d41a669634b63184c2c093e252d6b4bc4"),
|
||||||
common.HexToHash("0x5bdc0d6057b35642a16d27223ea5454e5a17a400e28f7328971a5f2a87773b76"),
|
common.HexToHash("0x4bdecec09691ad38113eebee2df94fadefdff5841c0f182bae1be3c8a6d60bf3"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
@ -937,10 +938,14 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// write0 request
|
// write0 request
|
||||||
|
paths, err := rlp.EncodeToRawList(tc.paths)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
req := &snap.GetTrieNodesPacket{
|
req := &snap.GetTrieNodesPacket{
|
||||||
ID: uint64(rand.Int63()),
|
ID: uint64(rand.Int63()),
|
||||||
Root: tc.root,
|
Root: tc.root,
|
||||||
Paths: tc.paths,
|
Paths: paths,
|
||||||
Bytes: tc.nBytes,
|
Bytes: tc.nBytes,
|
||||||
}
|
}
|
||||||
msg, err := conn.snapRequest(snap.GetTrieNodesMsg, req)
|
msg, err := conn.snapRequest(snap.GetTrieNodesMsg, req)
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,8 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -82,6 +84,7 @@ func (s *Suite) EthTests() []utesting.Test {
|
||||||
// get history
|
// get history
|
||||||
{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
|
{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
|
||||||
{Name: "GetReceipts", Fn: s.TestGetReceipts},
|
{Name: "GetReceipts", Fn: s.TestGetReceipts},
|
||||||
|
{Name: "GetLargeReceipts", Fn: s.TestGetLargeReceipts},
|
||||||
// test transactions
|
// test transactions
|
||||||
{Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
|
{Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
|
||||||
{Name: "Transaction", Fn: s.TestTransaction},
|
{Name: "Transaction", Fn: s.TestTransaction},
|
||||||
|
|
@ -151,7 +154,11 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get headers for given request: %v", err)
|
t.Fatalf("failed to get headers for given request: %v", err)
|
||||||
}
|
}
|
||||||
if !headersMatch(expected, headers.BlockHeadersRequest) {
|
received, err := headers.List.Items()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid headers received: %v", err)
|
||||||
|
}
|
||||||
|
if !headersMatch(expected, received) {
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers)
|
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -237,7 +244,7 @@ concurrently, with different request IDs.`)
|
||||||
|
|
||||||
// Wait for responses.
|
// Wait for responses.
|
||||||
// Note they can arrive in either order.
|
// Note they can arrive in either order.
|
||||||
resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 {
|
resp, err := collectHeaderResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 {
|
||||||
if msg.RequestId != 111 && msg.RequestId != 222 {
|
if msg.RequestId != 111 && msg.RequestId != 222 {
|
||||||
t.Fatalf("response with unknown request ID: %v", msg.RequestId)
|
t.Fatalf("response with unknown request ID: %v", msg.RequestId)
|
||||||
}
|
}
|
||||||
|
|
@ -248,17 +255,11 @@ concurrently, with different request IDs.`)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if headers match.
|
// Check if headers match.
|
||||||
resp1 := resp[111]
|
if err := s.checkHeadersAgainstChain(req1, resp[111]); err != nil {
|
||||||
if expected, err := s.chain.GetHeaders(req1); err != nil {
|
t.Fatal(err)
|
||||||
t.Fatalf("failed to get expected headers for request 1: %v", err)
|
|
||||||
} else if !headersMatch(expected, resp1.BlockHeadersRequest) {
|
|
||||||
t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 111, expected, resp1)
|
|
||||||
}
|
}
|
||||||
resp2 := resp[222]
|
if err := s.checkHeadersAgainstChain(req2, resp[222]); err != nil {
|
||||||
if expected, err := s.chain.GetHeaders(req2); err != nil {
|
t.Fatal(err)
|
||||||
t.Fatalf("failed to get expected headers for request 2: %v", err)
|
|
||||||
} else if !headersMatch(expected, resp2.BlockHeadersRequest) {
|
|
||||||
t.Fatalf("header mismatch for request ID %v: \nexpected %v \ngot %v", 222, expected, resp2)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -303,8 +304,8 @@ same request ID. The node should handle the request by responding to both reques
|
||||||
|
|
||||||
// Wait for the responses. They can arrive in either order, and we can't tell them
|
// Wait for the responses. They can arrive in either order, and we can't tell them
|
||||||
// apart by their request ID, so use the number of headers instead.
|
// apart by their request ID, so use the number of headers instead.
|
||||||
resp, err := collectResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 {
|
resp, err := collectHeaderResponses(conn, 2, func(msg *eth.BlockHeadersPacket) uint64 {
|
||||||
id := uint64(len(msg.BlockHeadersRequest))
|
id := uint64(msg.List.Len())
|
||||||
if id != 2 && id != 3 {
|
if id != 2 && id != 3 {
|
||||||
t.Fatalf("invalid number of headers in response: %d", id)
|
t.Fatalf("invalid number of headers in response: %d", id)
|
||||||
}
|
}
|
||||||
|
|
@ -315,26 +316,35 @@ same request ID. The node should handle the request by responding to both reques
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if headers match.
|
// Check if headers match.
|
||||||
resp1 := resp[2]
|
if err := s.checkHeadersAgainstChain(request1, resp[2]); err != nil {
|
||||||
if expected, err := s.chain.GetHeaders(request1); err != nil {
|
t.Fatal(err)
|
||||||
t.Fatalf("failed to get expected headers for request 1: %v", err)
|
|
||||||
} else if !headersMatch(expected, resp1.BlockHeadersRequest) {
|
|
||||||
t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp1)
|
|
||||||
}
|
}
|
||||||
resp2 := resp[3]
|
if err := s.checkHeadersAgainstChain(request2, resp[3]); err != nil {
|
||||||
if expected, err := s.chain.GetHeaders(request2); err != nil {
|
t.Fatal(err)
|
||||||
t.Fatalf("failed to get expected headers for request 2: %v", err)
|
|
||||||
} else if !headersMatch(expected, resp2.BlockHeadersRequest) {
|
|
||||||
t.Fatalf("headers mismatch: \nexpected %v \ngot %v", expected, resp2)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Suite) checkHeadersAgainstChain(req *eth.GetBlockHeadersPacket, resp *eth.BlockHeadersPacket) error {
|
||||||
|
received2, err := resp.List.Items()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid headers in response with request ID %v (%d items): %v", resp.RequestId, resp.List.Len(), err)
|
||||||
|
}
|
||||||
|
if expected, err := s.chain.GetHeaders(req); err != nil {
|
||||||
|
return fmt.Errorf("test chain failed to get expected headers for request: %v", err)
|
||||||
|
} else if !headersMatch(expected, received2) {
|
||||||
|
return fmt.Errorf("header mismatch for request ID %v (%d items): \nexpected %v \ngot %v", resp.RequestId, resp.List.Len(), expected, resp)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// collectResponses waits for n messages of type T on the given connection.
|
// collectResponses waits for n messages of type T on the given connection.
|
||||||
// The messsages are collected according to the 'identity' function.
|
// The messsages are collected according to the 'identity' function.
|
||||||
func collectResponses[T any, P msgTypePtr[T]](conn *Conn, n int, identity func(P) uint64) (map[uint64]P, error) {
|
//
|
||||||
resp := make(map[uint64]P, n)
|
// This function is written in a generic way to handle
|
||||||
|
func collectHeaderResponses(conn *Conn, n int, identity func(*eth.BlockHeadersPacket) uint64) (map[uint64]*eth.BlockHeadersPacket, error) {
|
||||||
|
resp := make(map[uint64]*eth.BlockHeadersPacket, n)
|
||||||
for range n {
|
for range n {
|
||||||
r := new(T)
|
r := new(eth.BlockHeadersPacket)
|
||||||
if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, r); err != nil {
|
if err := conn.ReadMsg(ethProto, eth.BlockHeadersMsg, r); err != nil {
|
||||||
return resp, fmt.Errorf("read error: %v", err)
|
return resp, fmt.Errorf("read error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -373,10 +383,8 @@ and expects a response.`)
|
||||||
if got, want := headers.RequestId, req.RequestId; got != want {
|
if got, want := headers.RequestId, req.RequestId; got != want {
|
||||||
t.Fatalf("unexpected request id")
|
t.Fatalf("unexpected request id")
|
||||||
}
|
}
|
||||||
if expected, err := s.chain.GetHeaders(req); err != nil {
|
if err := s.checkHeadersAgainstChain(req, headers); err != nil {
|
||||||
t.Fatalf("failed to get expected block headers: %v", err)
|
t.Fatal(err)
|
||||||
} else if !headersMatch(expected, headers.BlockHeadersRequest) {
|
|
||||||
t.Fatalf("header mismatch: \nexpected %v \ngot %v", expected, headers)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -407,9 +415,8 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) {
|
||||||
if got, want := resp.RequestId, req.RequestId; got != want {
|
if got, want := resp.RequestId, req.RequestId; got != want {
|
||||||
t.Fatalf("unexpected request id in respond", got, want)
|
t.Fatalf("unexpected request id in respond", got, want)
|
||||||
}
|
}
|
||||||
bodies := resp.BlockBodiesResponse
|
if resp.List.Len() != len(req.GetBlockBodiesRequest) {
|
||||||
if len(bodies) != len(req.GetBlockBodiesRequest) {
|
t.Fatalf("wrong bodies in response: expected %d bodies, got %d", len(req.GetBlockBodiesRequest), resp.List.Len())
|
||||||
t.Fatalf("wrong bodies in response: expected %d bodies, got %d", len(req.GetBlockBodiesRequest), len(bodies))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -424,6 +431,9 @@ func (s *Suite) TestGetReceipts(t *utesting.T) {
|
||||||
// Find some blocks containing receipts.
|
// Find some blocks containing receipts.
|
||||||
var hashes = make([]common.Hash, 0, 3)
|
var hashes = make([]common.Hash, 0, 3)
|
||||||
for i := range s.chain.Len() {
|
for i := range s.chain.Len() {
|
||||||
|
if s.chain.txInfo.LargeReceiptBlock != nil && uint64(i) == *s.chain.txInfo.LargeReceiptBlock {
|
||||||
|
continue
|
||||||
|
}
|
||||||
block := s.chain.GetBlock(i)
|
block := s.chain.GetBlock(i)
|
||||||
if len(block.Transactions()) > 0 {
|
if len(block.Transactions()) > 0 {
|
||||||
hashes = append(hashes, block.Hash())
|
hashes = append(hashes, block.Hash())
|
||||||
|
|
@ -432,9 +442,9 @@ func (s *Suite) TestGetReceipts(t *utesting.T) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if conn.negotiatedProtoVersion < eth.ETH70 {
|
||||||
// Create block bodies request.
|
// Create block bodies request.
|
||||||
req := ð.GetReceiptsPacket{
|
req := ð.GetReceiptsPacket69{
|
||||||
RequestId: 66,
|
RequestId: 66,
|
||||||
GetReceiptsRequest: (eth.GetReceiptsRequest)(hashes),
|
GetReceiptsRequest: (eth.GetReceiptsRequest)(hashes),
|
||||||
}
|
}
|
||||||
|
|
@ -442,15 +452,111 @@ func (s *Suite) TestGetReceipts(t *utesting.T) {
|
||||||
t.Fatalf("could not write to connection: %v", err)
|
t.Fatalf("could not write to connection: %v", err)
|
||||||
}
|
}
|
||||||
// Wait for response.
|
// Wait for response.
|
||||||
resp := new(eth.ReceiptsPacket[*eth.ReceiptList69])
|
resp := new(eth.ReceiptsPacket69)
|
||||||
if err := conn.ReadMsg(ethProto, eth.ReceiptsMsg, &resp); err != nil {
|
if err := conn.ReadMsg(ethProto, eth.ReceiptsMsg, &resp); err != nil {
|
||||||
t.Fatalf("error reading block bodies msg: %v", err)
|
t.Fatalf("error reading block receipts msg: %v", err)
|
||||||
}
|
}
|
||||||
if got, want := resp.RequestId, req.RequestId; got != want {
|
if got, want := resp.RequestId, req.RequestId; got != want {
|
||||||
t.Fatalf("unexpected request id in respond", got, want)
|
t.Fatalf("unexpected request id in respond", got, want)
|
||||||
}
|
}
|
||||||
if len(resp.List) != len(req.GetReceiptsRequest) {
|
if resp.List.Len() != len(req.GetReceiptsRequest) {
|
||||||
t.Fatalf("wrong bodies in response: expected %d bodies, got %d", len(req.GetReceiptsRequest), len(resp.List))
|
t.Fatalf("wrong receipts in response: expected %d receipts, got %d", len(req.GetReceiptsRequest), resp.List.Len())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Create block bodies request.
|
||||||
|
req := ð.GetReceiptsPacket70{
|
||||||
|
RequestId: 66,
|
||||||
|
FirstBlockReceiptIndex: 0,
|
||||||
|
GetReceiptsRequest: (eth.GetReceiptsRequest)(hashes),
|
||||||
|
}
|
||||||
|
if err := conn.Write(ethProto, eth.GetReceiptsMsg, req); err != nil {
|
||||||
|
t.Fatalf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
// Wait for response.
|
||||||
|
resp := new(eth.ReceiptsPacket70)
|
||||||
|
if err := conn.ReadMsg(ethProto, eth.ReceiptsMsg, &resp); err != nil {
|
||||||
|
t.Fatalf("error reading block receipts msg: %v", err)
|
||||||
|
}
|
||||||
|
if got, want := resp.RequestId, req.RequestId; got != want {
|
||||||
|
t.Fatalf("unexpected request id in respond", got, want)
|
||||||
|
}
|
||||||
|
if resp.List.Len() != len(req.GetReceiptsRequest) {
|
||||||
|
t.Fatalf("wrong receipts in response: expected %d receipts, got %d", len(req.GetReceiptsRequest), resp.List.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestGetLargeReceipts(t *utesting.T) {
|
||||||
|
t.Log(`This test sends GetReceipts requests to the node for large receipt (>10MiB) in the test chain.
|
||||||
|
This test is meaningful only if the client supports protocol version ETH70 or higher
|
||||||
|
and LargeReceiptBlock is configured in txInfo.json.`)
|
||||||
|
conn, err := s.dialAndPeer(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
if conn.negotiatedProtoVersion < eth.ETH70 || s.chain.txInfo.LargeReceiptBlock == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find block with large receipt.
|
||||||
|
// Place the large receipt block hash in the middle of the query
|
||||||
|
start := max(int(*s.chain.txInfo.LargeReceiptBlock)-2, 0)
|
||||||
|
end := min(*s.chain.txInfo.LargeReceiptBlock+2, uint64(len(s.chain.blocks)))
|
||||||
|
|
||||||
|
var blocks []common.Hash
|
||||||
|
var receiptHashes []common.Hash
|
||||||
|
var receipts []*eth.ReceiptList
|
||||||
|
|
||||||
|
for i := uint64(start); i < end; i++ {
|
||||||
|
block := s.chain.GetBlock(int(i))
|
||||||
|
blocks = append(blocks, block.Hash())
|
||||||
|
receiptHashes = append(receiptHashes, block.Header().ReceiptHash)
|
||||||
|
receipts = append(receipts, ð.ReceiptList{})
|
||||||
|
}
|
||||||
|
|
||||||
|
incomplete := false
|
||||||
|
lastBlock := 0
|
||||||
|
|
||||||
|
for incomplete || lastBlock != len(blocks)-1 {
|
||||||
|
// Create get receipt request.
|
||||||
|
req := ð.GetReceiptsPacket70{
|
||||||
|
RequestId: 66,
|
||||||
|
FirstBlockReceiptIndex: uint64(receipts[lastBlock].Derivable().Len()),
|
||||||
|
GetReceiptsRequest: blocks[lastBlock:],
|
||||||
|
}
|
||||||
|
if err := conn.Write(ethProto, eth.GetReceiptsMsg, req); err != nil {
|
||||||
|
t.Fatalf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
// Wait for response.
|
||||||
|
resp := new(eth.ReceiptsPacket70)
|
||||||
|
if err := conn.ReadMsg(ethProto, eth.ReceiptsMsg, &resp); err != nil {
|
||||||
|
t.Fatalf("error reading block receipts msg: %v", err)
|
||||||
|
}
|
||||||
|
if got, want := resp.RequestId, req.RequestId; got != want {
|
||||||
|
t.Fatalf("unexpected request id in respond, want: %d, got: %d", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
receiptLists, _ := resp.List.Items()
|
||||||
|
for i, rc := range receiptLists {
|
||||||
|
receipts[lastBlock+i].Append(rc)
|
||||||
|
}
|
||||||
|
lastBlock += len(receiptLists) - 1
|
||||||
|
|
||||||
|
incomplete = resp.LastBlockIncomplete
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := trie.NewStackTrie(nil)
|
||||||
|
hashes := make([]common.Hash, len(receipts))
|
||||||
|
for i := range receipts {
|
||||||
|
hashes[i] = types.DeriveSha(receipts[i].Derivable(), hasher)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, hash := range hashes {
|
||||||
|
if receiptHashes[i] != hash {
|
||||||
|
t.Fatalf("wrong receipt root: want %x, got %x", receiptHashes[i], hash)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -804,7 +910,11 @@ on another peer connection using GetPooledTransactions.`)
|
||||||
if got, want := msg.RequestId, req.RequestId; got != want {
|
if got, want := msg.RequestId, req.RequestId; got != want {
|
||||||
t.Fatalf("unexpected request id in response: got %d, want %d", got, want)
|
t.Fatalf("unexpected request id in response: got %d, want %d", got, want)
|
||||||
}
|
}
|
||||||
for _, got := range msg.PooledTransactionsResponse {
|
responseTxs, err := msg.List.Items()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("invalid transactions in response: %v", err)
|
||||||
|
}
|
||||||
|
for _, got := range responseTxs {
|
||||||
if _, exists := set[got.Hash()]; !exists {
|
if _, exists := set[got.Hash()]; !exists {
|
||||||
t.Fatalf("unexpected tx received: %v", got.Hash())
|
t.Fatalf("unexpected tx received: %v", got.Hash())
|
||||||
}
|
}
|
||||||
|
|
@ -976,7 +1086,9 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
|
||||||
if err := conn.ReadMsg(ethProto, eth.GetPooledTransactionsMsg, req); err != nil {
|
if err := conn.ReadMsg(ethProto, eth.GetPooledTransactionsMsg, req); err != nil {
|
||||||
t.Fatalf("reading pooled tx request failed: %v", err)
|
t.Fatalf("reading pooled tx request failed: %v", err)
|
||||||
}
|
}
|
||||||
resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, PooledTransactionsResponse: test.resp}
|
|
||||||
|
encTxs, _ := rlp.EncodeToRawList(test.resp)
|
||||||
|
resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, List: encTxs}
|
||||||
if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil {
|
if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil {
|
||||||
t.Fatalf("writing pooled tx response failed: %v", err)
|
t.Fatalf("writing pooled tx response failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -1104,7 +1216,8 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
|
||||||
// the good peer is connected, and has announced the tx.
|
// the good peer is connected, and has announced the tx.
|
||||||
// proceed to send the incorrect one from the bad peer.
|
// proceed to send the incorrect one from the bad peer.
|
||||||
|
|
||||||
resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, PooledTransactionsResponse: eth.PooledTransactionsResponse(types.Transactions{badTx})}
|
encTxs, _ := rlp.EncodeToRawList([]*types.Transaction{badTx})
|
||||||
|
resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, List: encTxs}
|
||||||
if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil {
|
if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil {
|
||||||
errc <- fmt.Errorf("writing pooled tx response failed: %v", err)
|
errc <- fmt.Errorf("writing pooled tx response failed: %v", err)
|
||||||
return
|
return
|
||||||
|
|
@ -1164,7 +1277,8 @@ func (s *Suite) testBadBlobTx(t *utesting.T, tx *types.Transaction, badTx *types
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, PooledTransactionsResponse: eth.PooledTransactionsResponse(types.Transactions{tx})}
|
encTxs, _ := rlp.EncodeToRawList([]*types.Transaction{tx})
|
||||||
|
resp := eth.PooledTransactionsPacket{RequestId: req.RequestId, List: encTxs}
|
||||||
if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil {
|
if err := conn.Write(ethProto, eth.PooledTransactionsMsg, resp); err != nil {
|
||||||
errc <- fmt.Errorf("writing pooled tx response failed: %v", err)
|
errc <- fmt.Errorf("writing pooled tx response failed: %v", err)
|
||||||
return
|
return
|
||||||
|
|
|
||||||
BIN
cmd/devp2p/internal/ethtest/testdata/chain.rlp
vendored
BIN
cmd/devp2p/internal/ethtest/testdata/chain.rlp
vendored
Binary file not shown.
|
|
@ -37,7 +37,7 @@
|
||||||
"nonce": "0x0",
|
"nonce": "0x0",
|
||||||
"timestamp": "0x0",
|
"timestamp": "0x0",
|
||||||
"extraData": "0x68697665636861696e",
|
"extraData": "0x68697665636861696e",
|
||||||
"gasLimit": "0x23f3e20",
|
"gasLimit": "0x11e1a300",
|
||||||
"difficulty": "0x20000",
|
"difficulty": "0x20000",
|
||||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"coinbase": "0x0000000000000000000000000000000000000000",
|
"coinbase": "0x0000000000000000000000000000000000000000",
|
||||||
|
|
@ -119,6 +119,10 @@
|
||||||
"balance": "0x1",
|
"balance": "0x1",
|
||||||
"nonce": "0x1"
|
"nonce": "0x1"
|
||||||
},
|
},
|
||||||
|
"8dcd17433742f4c0ca53122ab541d0ba67fc27ff": {
|
||||||
|
"code": "0x6202e6306000a0",
|
||||||
|
"balance": "0x0"
|
||||||
|
},
|
||||||
"c7b99a164efd027a93f147376cc7da7c67c6bbe0": {
|
"c7b99a164efd027a93f147376cc7da7c67c6bbe0": {
|
||||||
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
"balance": "0xc097ce7bc90715b34b9f1000000000"
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -1,24 +1,24 @@
|
||||||
{
|
{
|
||||||
"parentHash": "0x65151b101682b54cd08ba226f640c14c86176865ff9bfc57e0147dadaeac34bb",
|
"parentHash": "0x7e80093a491eba0e5b2c1895837902f64f514100221801318fe391e1e09c96a6",
|
||||||
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||||
"miner": "0x0000000000000000000000000000000000000000",
|
"miner": "0x0000000000000000000000000000000000000000",
|
||||||
"stateRoot": "0xce423ebc60fc7764a43f09f1fe3ae61eef25e3eb8d09b1108f7e7eb77dfff5e6",
|
"stateRoot": "0x8fcfb02cfca007773bd55bc1c3e50a3c8612a59c87ce057e5957e8bf17c1728b",
|
||||||
"transactionsRoot": "0x7ec1ae3989efa75d7bcc766e5e2443afa8a89a5fda42ebba90050e7e702980f7",
|
"transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"receiptsRoot": "0xfe160832b1ca85f38c6674cb0aae3a24693bc49be56e2ecdf3698b71a794de86",
|
"receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"difficulty": "0x0",
|
"difficulty": "0x0",
|
||||||
"number": "0x258",
|
"number": "0x258",
|
||||||
"gasLimit": "0x23f3e20",
|
"gasLimit": "0x11e1a300",
|
||||||
"gasUsed": "0x19d36",
|
"gasUsed": "0x0",
|
||||||
"timestamp": "0x1770",
|
"timestamp": "0x1770",
|
||||||
"extraData": "0x",
|
"extraData": "0x",
|
||||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"nonce": "0x0000000000000000",
|
"nonce": "0x0000000000000000",
|
||||||
"baseFeePerGas": "0x7",
|
"baseFeePerGas": "0x7",
|
||||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
"withdrawalsRoot": "0x92abfda39de7df7d705c5a8f30386802ad59d31e782a06d5c5b0f9a260056cf0",
|
||||||
"blobGasUsed": "0x0",
|
"blobGasUsed": "0x0",
|
||||||
"excessBlobGas": "0x0",
|
"excessBlobGas": "0x0",
|
||||||
"parentBeaconBlockRoot": "0xf5003fc8f92358e790a114bce93ce1d9c283c85e1787f8d7d56714d3489b49e6",
|
"parentBeaconBlockRoot": "0xf5003fc8f92358e790a114bce93ce1d9c283c85e1787f8d7d56714d3489b49e6",
|
||||||
"requestsHash": "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
"requestsHash": "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||||
"hash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0"
|
"hash": "0x44e3809c9a3cda717f00aea3a9da336d149612c8d5657fbc0028176ef8d94d2a"
|
||||||
}
|
}
|
||||||
|
|
@ -4,9 +4,9 @@
|
||||||
"method": "engine_forkchoiceUpdatedV3",
|
"method": "engine_forkchoiceUpdatedV3",
|
||||||
"params": [
|
"params": [
|
||||||
{
|
{
|
||||||
"headBlockHash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0",
|
"headBlockHash": "0x44e3809c9a3cda717f00aea3a9da336d149612c8d5657fbc0028176ef8d94d2a",
|
||||||
"safeBlockHash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0",
|
"safeBlockHash": "0x44e3809c9a3cda717f00aea3a9da336d149612c8d5657fbc0028176ef8d94d2a",
|
||||||
"finalizedBlockHash": "0xce8d86ba17a2ec303155f0e264c58a4b8f94ce3436274cf1924f91acdb7502d0"
|
"finalizedBlockHash": "0x44e3809c9a3cda717f00aea3a9da336d149612c8d5657fbc0028176ef8d94d2a"
|
||||||
},
|
},
|
||||||
null
|
null
|
||||||
]
|
]
|
||||||
|
|
|
||||||
4210
cmd/devp2p/internal/ethtest/testdata/headstate.json
vendored
4210
cmd/devp2p/internal/ethtest/testdata/headstate.json
vendored
File diff suppressed because it is too large
Load diff
10313
cmd/devp2p/internal/ethtest/testdata/newpayload.json
vendored
10313
cmd/devp2p/internal/ethtest/testdata/newpayload.json
vendored
File diff suppressed because it is too large
Load diff
2943
cmd/devp2p/internal/ethtest/testdata/txinfo.json
vendored
2943
cmd/devp2p/internal/ethtest/testdata/txinfo.json
vendored
File diff suppressed because it is too large
Load diff
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/internal/utesting"
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
)
|
)
|
||||||
|
|
||||||
// sendTxs sends the given transactions to the node and
|
// sendTxs sends the given transactions to the node and
|
||||||
|
|
@ -51,7 +52,8 @@ func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error {
|
||||||
return fmt.Errorf("peering failed: %v", err)
|
return fmt.Errorf("peering failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = sendConn.Write(ethProto, eth.TransactionsMsg, eth.TransactionsPacket(txs)); err != nil {
|
encTxs, _ := rlp.EncodeToRawList(txs)
|
||||||
|
if err = sendConn.Write(ethProto, eth.TransactionsMsg, eth.TransactionsPacket{RawList: encTxs}); err != nil {
|
||||||
return fmt.Errorf("failed to write message to connection: %v", err)
|
return fmt.Errorf("failed to write message to connection: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -68,7 +70,8 @@ func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error {
|
||||||
}
|
}
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
case *eth.TransactionsPacket:
|
case *eth.TransactionsPacket:
|
||||||
for _, tx := range *msg {
|
txs, _ := msg.Items()
|
||||||
|
for _, tx := range txs {
|
||||||
got[tx.Hash()] = true
|
got[tx.Hash()] = true
|
||||||
}
|
}
|
||||||
case *eth.NewPooledTransactionHashesPacket:
|
case *eth.NewPooledTransactionHashesPacket:
|
||||||
|
|
@ -80,9 +83,10 @@ func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("invalid GetBlockHeaders request: %v", err)
|
t.Logf("invalid GetBlockHeaders request: %v", err)
|
||||||
}
|
}
|
||||||
|
encHeaders, _ := rlp.EncodeToRawList(headers)
|
||||||
recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
|
recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
|
||||||
RequestId: msg.RequestId,
|
RequestId: msg.RequestId,
|
||||||
BlockHeadersRequest: headers,
|
List: encHeaders,
|
||||||
})
|
})
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg))
|
||||||
|
|
@ -167,9 +171,10 @@ func (s *Suite) sendInvalidTxs(t *utesting.T, txs []*types.Transaction) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("invalid GetBlockHeaders request: %v", err)
|
t.Logf("invalid GetBlockHeaders request: %v", err)
|
||||||
}
|
}
|
||||||
|
encHeaders, _ := rlp.EncodeToRawList(headers)
|
||||||
recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
|
recvConn.Write(ethProto, eth.BlockHeadersMsg, ð.BlockHeadersPacket{
|
||||||
RequestId: msg.RequestId,
|
RequestId: msg.RequestId,
|
||||||
BlockHeadersRequest: headers,
|
List: encHeaders,
|
||||||
})
|
})
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg))
|
return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg))
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ func (s *Suite) AllTests() []utesting.Test {
|
||||||
{Name: "Ping", Fn: s.TestPing},
|
{Name: "Ping", Fn: s.TestPing},
|
||||||
{Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID},
|
{Name: "PingLargeRequestID", Fn: s.TestPingLargeRequestID},
|
||||||
{Name: "PingMultiIP", Fn: s.TestPingMultiIP},
|
{Name: "PingMultiIP", Fn: s.TestPingMultiIP},
|
||||||
{Name: "PingHandshakeInterrupted", Fn: s.TestPingHandshakeInterrupted},
|
{Name: "HandshakeResend", Fn: s.TestHandshakeResend},
|
||||||
{Name: "TalkRequest", Fn: s.TestTalkRequest},
|
{Name: "TalkRequest", Fn: s.TestTalkRequest},
|
||||||
{Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance},
|
{Name: "FindnodeZeroDistance", Fn: s.TestFindnodeZeroDistance},
|
||||||
{Name: "FindnodeResults", Fn: s.TestFindnodeResults},
|
{Name: "FindnodeResults", Fn: s.TestFindnodeResults},
|
||||||
|
|
@ -158,22 +158,20 @@ the attempt from a different IP.`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message
|
// TestHandshakeResend starts a handshake, but doesn't finish it and sends a second ordinary message
|
||||||
// packet instead of a handshake message packet. The remote node should respond with
|
// packet instead of a handshake message packet. The remote node should repeat the previous WHOAREYOU
|
||||||
// another WHOAREYOU challenge for the second packet.
|
// challenge for the first PING.
|
||||||
func (s *Suite) TestPingHandshakeInterrupted(t *utesting.T) {
|
func (s *Suite) TestHandshakeResend(t *utesting.T) {
|
||||||
t.Log(`TestPingHandshakeInterrupted starts a handshake, but doesn't finish it and sends a second ordinary message
|
|
||||||
packet instead of a handshake message packet. The remote node should respond with
|
|
||||||
another WHOAREYOU challenge for the second packet.`)
|
|
||||||
|
|
||||||
conn, l1 := s.listen1(t)
|
conn, l1 := s.listen1(t)
|
||||||
defer conn.close()
|
defer conn.close()
|
||||||
|
|
||||||
// First PING triggers challenge.
|
// First PING triggers challenge.
|
||||||
ping := &v5wire.Ping{ReqID: conn.nextReqID()}
|
ping := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||||
conn.write(l1, ping, nil)
|
conn.write(l1, ping, nil)
|
||||||
|
var challenge1 *v5wire.Whoareyou
|
||||||
switch resp := conn.read(l1).(type) {
|
switch resp := conn.read(l1).(type) {
|
||||||
case *v5wire.Whoareyou:
|
case *v5wire.Whoareyou:
|
||||||
|
challenge1 = resp
|
||||||
t.Logf("got WHOAREYOU for PING")
|
t.Logf("got WHOAREYOU for PING")
|
||||||
default:
|
default:
|
||||||
t.Fatal("expected WHOAREYOU, got", resp)
|
t.Fatal("expected WHOAREYOU, got", resp)
|
||||||
|
|
@ -181,9 +179,16 @@ another WHOAREYOU challenge for the second packet.`)
|
||||||
|
|
||||||
// Send second PING.
|
// Send second PING.
|
||||||
ping2 := &v5wire.Ping{ReqID: conn.nextReqID()}
|
ping2 := &v5wire.Ping{ReqID: conn.nextReqID()}
|
||||||
switch resp := conn.reqresp(l1, ping2).(type) {
|
conn.write(l1, ping2, nil)
|
||||||
case *v5wire.Pong:
|
switch resp := conn.read(l1).(type) {
|
||||||
checkPong(t, resp, ping2, l1)
|
case *v5wire.Whoareyou:
|
||||||
|
if resp.Nonce != challenge1.Nonce {
|
||||||
|
t.Fatalf("wrong nonce %x in WHOAREYOU (want %x)", resp.Nonce[:], challenge1.Nonce[:])
|
||||||
|
}
|
||||||
|
if !bytes.Equal(resp.ChallengeData, challenge1.ChallengeData) {
|
||||||
|
t.Fatalf("wrong ChallengeData in resent WHOAREYOU (want %x)", resp.ChallengeData, challenge1.ChallengeData)
|
||||||
|
}
|
||||||
|
resp.Node = conn.remote
|
||||||
default:
|
default:
|
||||||
t.Fatal("expected WHOAREYOU, got", resp)
|
t.Fatal("expected WHOAREYOU, got", resp)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -218,11 +218,15 @@ func (tc *conn) read(c net.PacketConn) v5wire.Packet {
|
||||||
if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil {
|
if err := c.SetReadDeadline(time.Now().Add(waitTime)); err != nil {
|
||||||
return &readError{err}
|
return &readError{err}
|
||||||
}
|
}
|
||||||
n, fromAddr, err := c.ReadFrom(buf)
|
n, _, err := c.ReadFrom(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &readError{err}
|
return &readError{err}
|
||||||
}
|
}
|
||||||
_, _, p, err := tc.codec.Decode(buf[:n], fromAddr.String())
|
// Always use tc.remoteAddr for session lookup. The actual source address of
|
||||||
|
// the packet may differ from tc.remoteAddr when the remote node is reachable
|
||||||
|
// via multiple networks (e.g. Docker bridge vs. overlay), but the codec's
|
||||||
|
// session cache is keyed by the address used during Encode.
|
||||||
|
_, _, p, err := tc.codec.Decode(buf[:n], tc.remoteAddr.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return &readError{err}
|
return &readError{err}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
118
cmd/era/main.go
118
cmd/era/main.go
|
|
@ -30,6 +30,8 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/internal/era"
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era/execdb"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era/onedb"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
|
@ -53,7 +55,7 @@ var (
|
||||||
eraSizeFlag = &cli.IntFlag{
|
eraSizeFlag = &cli.IntFlag{
|
||||||
Name: "size",
|
Name: "size",
|
||||||
Usage: "number of blocks per era",
|
Usage: "number of blocks per era",
|
||||||
Value: era.MaxEra1Size,
|
Value: era.MaxSize,
|
||||||
}
|
}
|
||||||
txsFlag = &cli.BoolFlag{
|
txsFlag = &cli.BoolFlag{
|
||||||
Name: "txs",
|
Name: "txs",
|
||||||
|
|
@ -131,7 +133,7 @@ func block(ctx *cli.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// info prints some high-level information about the era1 file.
|
// info prints some high-level information about the era file.
|
||||||
func info(ctx *cli.Context) error {
|
func info(ctx *cli.Context) error {
|
||||||
epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
|
epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -142,33 +144,34 @@ func info(ctx *cli.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer e.Close()
|
defer e.Close()
|
||||||
acc, err := e.Accumulator()
|
var (
|
||||||
if err != nil {
|
accHex string
|
||||||
return fmt.Errorf("error reading accumulator: %w", err)
|
tdStr string
|
||||||
|
)
|
||||||
|
if acc, err := e.Accumulator(); err == nil {
|
||||||
|
accHex = acc.Hex()
|
||||||
}
|
}
|
||||||
td, err := e.InitialTD()
|
if td, err := e.InitialTD(); err == nil {
|
||||||
if err != nil {
|
tdStr = td.String()
|
||||||
return fmt.Errorf("error reading total difficulty: %w", err)
|
|
||||||
}
|
}
|
||||||
info := struct {
|
info := struct {
|
||||||
Accumulator common.Hash `json:"accumulator"`
|
Accumulator string `json:"accumulator,omitempty"`
|
||||||
TotalDifficulty *big.Int `json:"totalDifficulty"`
|
TotalDifficulty string `json:"totalDifficulty,omitempty"`
|
||||||
StartBlock uint64 `json:"startBlock"`
|
StartBlock uint64 `json:"startBlock"`
|
||||||
Count uint64 `json:"count"`
|
Count uint64 `json:"count"`
|
||||||
}{
|
}{
|
||||||
acc, td, e.Start(), e.Count(),
|
accHex, tdStr, e.Start(), e.Count(),
|
||||||
}
|
}
|
||||||
b, _ := json.MarshalIndent(info, "", " ")
|
b, _ := json.MarshalIndent(info, "", " ")
|
||||||
fmt.Println(string(b))
|
fmt.Println(string(b))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// open opens an era1 file at a certain epoch.
|
// open opens an era file at a certain epoch.
|
||||||
func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
|
func open(ctx *cli.Context, epoch uint64) (era.Era, error) {
|
||||||
var (
|
dir := ctx.String(dirFlag.Name)
|
||||||
dir = ctx.String(dirFlag.Name)
|
network := ctx.String(networkFlag.Name)
|
||||||
network = ctx.String(networkFlag.Name)
|
|
||||||
)
|
|
||||||
entries, err := era.ReadDir(dir, network)
|
entries, err := era.ReadDir(dir, network)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error reading era dir: %w", err)
|
return nil, fmt.Errorf("error reading era dir: %w", err)
|
||||||
|
|
@ -176,7 +179,28 @@ func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
|
||||||
if epoch >= uint64(len(entries)) {
|
if epoch >= uint64(len(entries)) {
|
||||||
return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
|
return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
|
||||||
}
|
}
|
||||||
return era.Open(filepath.Join(dir, entries[epoch]))
|
path := filepath.Join(dir, entries[epoch])
|
||||||
|
return openByPath(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// openByPath tries to open a single file as either eraE or era1 based on extension,
|
||||||
|
// falling back to the other reader if needed.
|
||||||
|
func openByPath(path string) (era.Era, error) {
|
||||||
|
switch strings.ToLower(filepath.Ext(path)) {
|
||||||
|
case ".erae":
|
||||||
|
if e, err := execdb.Open(path); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
case ".era1":
|
||||||
|
if e, err := onedb.Open(path); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unsupported or unreadable era file: %s", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify checks each era1 file in a directory to ensure it is well-formed and
|
// verify checks each era1 file in a directory to ensure it is well-formed and
|
||||||
|
|
@ -203,18 +227,58 @@ func verify(ctx *cli.Context) error {
|
||||||
return fmt.Errorf("error reading %s: %w", dir, err)
|
return fmt.Errorf("error reading %s: %w", dir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entries) != len(roots) {
|
// Build the verification list respecting the rule:
|
||||||
return errors.New("number of era1 files should match the number of accumulator hashes")
|
// era1: must have accumulator, always verify
|
||||||
|
// erae: verify only if accumulator exists (pre-merge)
|
||||||
|
|
||||||
|
// Build list of files to verify.
|
||||||
|
verify := make([]string, 0, len(entries))
|
||||||
|
|
||||||
|
for _, name := range entries {
|
||||||
|
path := filepath.Join(dir, name)
|
||||||
|
ext := strings.ToLower(filepath.Ext(name))
|
||||||
|
|
||||||
|
switch ext {
|
||||||
|
case ".era1":
|
||||||
|
e, err := onedb.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening era1 file %s: %w", name, err)
|
||||||
|
}
|
||||||
|
_, accErr := e.Accumulator()
|
||||||
|
e.Close()
|
||||||
|
if accErr != nil {
|
||||||
|
return fmt.Errorf("era1 file %s missing accumulator: %w", name, accErr)
|
||||||
|
}
|
||||||
|
verify = append(verify, path)
|
||||||
|
|
||||||
|
case ".erae":
|
||||||
|
e, err := execdb.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening erae file %s: %w", name, err)
|
||||||
|
}
|
||||||
|
_, accErr := e.Accumulator()
|
||||||
|
e.Close()
|
||||||
|
if accErr == nil {
|
||||||
|
verify = append(verify, path) // pre-merge only
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unsupported era file: %s", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(verify) != len(roots) {
|
||||||
|
return fmt.Errorf("mismatch between eras to verify (%d) and provided roots (%d)", len(verify), len(roots))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify each epoch matches the expected root.
|
// Verify each epoch matches the expected root.
|
||||||
for i, want := range roots {
|
for i, want := range roots {
|
||||||
// Wrap in function so defers don't stack.
|
// Wrap in function so defers don't stack.
|
||||||
err := func() error {
|
err := func() error {
|
||||||
name := entries[i]
|
path := verify[i]
|
||||||
e, err := era.Open(filepath.Join(dir, name))
|
name := filepath.Base(path)
|
||||||
|
e, err := openByPath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error opening era1 file %s: %w", name, err)
|
return fmt.Errorf("error opening era file %s: %w", name, err)
|
||||||
}
|
}
|
||||||
defer e.Close()
|
defer e.Close()
|
||||||
// Read accumulator and check against expected.
|
// Read accumulator and check against expected.
|
||||||
|
|
@ -243,7 +307,7 @@ func verify(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkAccumulator verifies the accumulator matches the data in the Era.
|
// checkAccumulator verifies the accumulator matches the data in the Era.
|
||||||
func checkAccumulator(e *era.Era) error {
|
func checkAccumulator(e era.Era) error {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
want common.Hash
|
want common.Hash
|
||||||
|
|
@ -257,7 +321,7 @@ func checkAccumulator(e *era.Era) error {
|
||||||
if td, err = e.InitialTD(); err != nil {
|
if td, err = e.InitialTD(); err != nil {
|
||||||
return fmt.Errorf("error reading total difficulty: %w", err)
|
return fmt.Errorf("error reading total difficulty: %w", err)
|
||||||
}
|
}
|
||||||
it, err := era.NewIterator(e)
|
it, err := e.Iterator()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error making era iterator: %w", err)
|
return fmt.Errorf("error making era iterator: %w", err)
|
||||||
}
|
}
|
||||||
|
|
@ -290,10 +354,14 @@ func checkAccumulator(e *era.Era) error {
|
||||||
if rr != block.ReceiptHash() {
|
if rr != block.ReceiptHash() {
|
||||||
return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
|
return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
|
||||||
}
|
}
|
||||||
|
// Only include pre-merge blocks in accumulator calculation.
|
||||||
|
// Post-merge blocks have difficulty == 0.
|
||||||
|
if block.Difficulty().Sign() > 0 {
|
||||||
hashes = append(hashes, block.Hash())
|
hashes = append(hashes, block.Hash())
|
||||||
td.Add(td, block.Difficulty())
|
td.Add(td, block.Difficulty())
|
||||||
tds = append(tds, new(big.Int).Set(td))
|
tds = append(tds, new(big.Int).Set(td))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
if it.Error() != nil {
|
if it.Error() != nil {
|
||||||
return fmt.Errorf("error reading block %d: %w", it.Number(), it.Error())
|
return fmt.Errorf("error reading block %d: %w", it.Number(), it.Error())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,16 +17,18 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"maps"
|
"maps"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
@ -34,21 +36,22 @@ import (
|
||||||
var blockTestCommand = &cli.Command{
|
var blockTestCommand = &cli.Command{
|
||||||
Action: blockTestCmd,
|
Action: blockTestCmd,
|
||||||
Name: "blocktest",
|
Name: "blocktest",
|
||||||
Usage: "Executes the given blockchain tests",
|
Usage: "Executes the given blockchain tests. Filenames can be fed via standard input (batch mode) or as an argument (one-off execution).",
|
||||||
ArgsUsage: "<path>",
|
ArgsUsage: "<path>",
|
||||||
Flags: slices.Concat([]cli.Flag{
|
Flags: slices.Concat([]cli.Flag{
|
||||||
DumpFlag,
|
DumpFlag,
|
||||||
HumanReadableFlag,
|
HumanReadableFlag,
|
||||||
RunFlag,
|
RunFlag,
|
||||||
WitnessCrossCheckFlag,
|
WitnessCrossCheckFlag,
|
||||||
|
FuzzFlag,
|
||||||
}, traceFlags),
|
}, traceFlags),
|
||||||
}
|
}
|
||||||
|
|
||||||
func blockTestCmd(ctx *cli.Context) error {
|
func blockTestCmd(ctx *cli.Context) error {
|
||||||
path := ctx.Args().First()
|
path := ctx.Args().First()
|
||||||
if len(path) == 0 {
|
|
||||||
return errors.New("path argument required")
|
// If path is provided, run the tests at that path.
|
||||||
}
|
if len(path) != 0 {
|
||||||
var (
|
var (
|
||||||
collected = collectFiles(path)
|
collected = collectFiles(path)
|
||||||
results []testResult
|
results []testResult
|
||||||
|
|
@ -62,6 +65,24 @@ func blockTestCmd(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
report(ctx, results)
|
report(ctx, results)
|
||||||
return nil
|
return nil
|
||||||
|
}
|
||||||
|
// Otherwise, read filenames from stdin and execute back-to-back.
|
||||||
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
|
for scanner.Scan() {
|
||||||
|
fname := scanner.Text()
|
||||||
|
if len(fname) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
results, err := runBlockTest(ctx, fname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// During fuzzing, we report the result after every block
|
||||||
|
if !ctx.IsSet(FuzzFlag.Name) {
|
||||||
|
report(ctx, results)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
|
func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
|
||||||
|
|
@ -79,6 +100,11 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
|
||||||
}
|
}
|
||||||
tracer := tracerFromFlags(ctx)
|
tracer := tracerFromFlags(ctx)
|
||||||
|
|
||||||
|
// Suppress INFO logs during fuzzing
|
||||||
|
if ctx.IsSet(FuzzFlag.Name) {
|
||||||
|
log.SetDefault(log.NewLogger(log.DiscardHandler()))
|
||||||
|
}
|
||||||
|
|
||||||
// Pull out keys to sort and ensure tests are run in order.
|
// Pull out keys to sort and ensure tests are run in order.
|
||||||
keys := slices.Sorted(maps.Keys(tests))
|
keys := slices.Sorted(maps.Keys(tests))
|
||||||
|
|
||||||
|
|
@ -88,16 +114,35 @@ func runBlockTest(ctx *cli.Context, fname string) ([]testResult, error) {
|
||||||
if !re.MatchString(name) {
|
if !re.MatchString(name) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
test := tests[name]
|
||||||
result := &testResult{Name: name, Pass: true}
|
result := &testResult{Name: name, Pass: true}
|
||||||
if err := tests[name].Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
|
var finalRoot *common.Hash
|
||||||
|
if err := test.Run(false, rawdb.PathScheme, ctx.Bool(WitnessCrossCheckFlag.Name), tracer, func(res error, chain *core.BlockChain) {
|
||||||
if ctx.Bool(DumpFlag.Name) {
|
if ctx.Bool(DumpFlag.Name) {
|
||||||
if s, _ := chain.State(); s != nil {
|
if s, _ := chain.State(); s != nil {
|
||||||
result.State = dump(s)
|
result.State = dump(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Capture final state root for end marker
|
||||||
|
if chain != nil {
|
||||||
|
root := chain.CurrentBlock().Root
|
||||||
|
finalRoot = &root
|
||||||
|
}
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
result.Pass, result.Error = false, err.Error()
|
result.Pass, result.Error = false, err.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Always assign fork (regardless of pass/fail or tracer)
|
||||||
|
result.Fork = test.Network()
|
||||||
|
// Assign root if test succeeded
|
||||||
|
if result.Pass && finalRoot != nil {
|
||||||
|
result.Root = finalRoot
|
||||||
|
}
|
||||||
|
|
||||||
|
// When fuzzing, write results after every block
|
||||||
|
if ctx.IsSet(FuzzFlag.Name) {
|
||||||
|
report(ctx, []testResult{*result})
|
||||||
|
}
|
||||||
results = append(results, *result)
|
results = append(results, *result)
|
||||||
}
|
}
|
||||||
return results, nil
|
return results, nil
|
||||||
|
|
|
||||||
|
|
@ -56,6 +56,7 @@ type header struct {
|
||||||
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
|
BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"`
|
||||||
ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
|
ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"`
|
||||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
||||||
|
SlotNumber *uint64 `json:"slotNumber" rlp:"optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type headerMarshaling struct {
|
type headerMarshaling struct {
|
||||||
|
|
@ -68,6 +69,7 @@ type headerMarshaling struct {
|
||||||
BaseFee *math.HexOrDecimal256
|
BaseFee *math.HexOrDecimal256
|
||||||
BlobGasUsed *math.HexOrDecimal64
|
BlobGasUsed *math.HexOrDecimal64
|
||||||
ExcessBlobGas *math.HexOrDecimal64
|
ExcessBlobGas *math.HexOrDecimal64
|
||||||
|
SlotNumber *math.HexOrDecimal64
|
||||||
}
|
}
|
||||||
|
|
||||||
type bbInput struct {
|
type bbInput struct {
|
||||||
|
|
@ -136,6 +138,7 @@ func (i *bbInput) ToBlock() *types.Block {
|
||||||
BlobGasUsed: i.Header.BlobGasUsed,
|
BlobGasUsed: i.Header.BlobGasUsed,
|
||||||
ExcessBlobGas: i.Header.ExcessBlobGas,
|
ExcessBlobGas: i.Header.ExcessBlobGas,
|
||||||
ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
|
ParentBeaconRoot: i.Header.ParentBeaconBlockRoot,
|
||||||
|
SlotNumber: i.Header.SlotNumber,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fill optional values.
|
// Fill optional values.
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ package t8ntool
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
stdmath "math"
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
|
@ -32,6 +33,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/tracing"
|
"github.com/ethereum/go-ethereum/core/tracing"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto/keccak"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
|
@ -39,12 +41,12 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/triedb"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
"golang.org/x/crypto/sha3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Prestate struct {
|
type Prestate struct {
|
||||||
Env stEnv `json:"env"`
|
Env stEnv `json:"env"`
|
||||||
Pre types.GenesisAlloc `json:"pre"`
|
Pre types.GenesisAlloc `json:"pre"`
|
||||||
|
TreeLeaves map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go
|
//go:generate go run github.com/fjl/gencodec -type ExecutionResult -field-override executionResultMarshaling -out gen_execresult.go
|
||||||
|
|
@ -100,6 +102,7 @@ type stEnv struct {
|
||||||
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
|
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
|
||||||
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
|
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
|
||||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||||
|
SlotNumber *uint64 `json:"slotNumber"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type stEnvMarshaling struct {
|
type stEnvMarshaling struct {
|
||||||
|
|
@ -118,6 +121,7 @@ type stEnvMarshaling struct {
|
||||||
ExcessBlobGas *math.HexOrDecimal64
|
ExcessBlobGas *math.HexOrDecimal64
|
||||||
ParentExcessBlobGas *math.HexOrDecimal64
|
ParentExcessBlobGas *math.HexOrDecimal64
|
||||||
ParentBlobGasUsed *math.HexOrDecimal64
|
ParentBlobGasUsed *math.HexOrDecimal64
|
||||||
|
SlotNumber *math.HexOrDecimal64
|
||||||
}
|
}
|
||||||
|
|
||||||
type rejectedTx struct {
|
type rejectedTx struct {
|
||||||
|
|
@ -142,17 +146,16 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
return h
|
return h
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre)
|
isEIP4762 = chainConfig.IsVerkle(big.NewInt(int64(pre.Env.Number)), pre.Env.Timestamp)
|
||||||
|
statedb = MakePreState(rawdb.NewMemoryDatabase(), pre.Pre, isEIP4762)
|
||||||
signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp)
|
signer = types.MakeSigner(chainConfig, new(big.Int).SetUint64(pre.Env.Number), pre.Env.Timestamp)
|
||||||
gaspool = new(core.GasPool)
|
gaspool = core.NewGasPool(pre.Env.GasLimit)
|
||||||
blockHash = common.Hash{0x13, 0x37}
|
blockHash = common.Hash{0x13, 0x37}
|
||||||
rejectedTxs []*rejectedTx
|
rejectedTxs []*rejectedTx
|
||||||
includedTxs types.Transactions
|
includedTxs types.Transactions
|
||||||
gasUsed = uint64(0)
|
|
||||||
blobGasUsed = uint64(0)
|
blobGasUsed = uint64(0)
|
||||||
receipts = make(types.Receipts, 0)
|
receipts = make(types.Receipts, 0)
|
||||||
)
|
)
|
||||||
gaspool.AddGas(pre.Env.GasLimit)
|
|
||||||
vmContext := vm.BlockContext{
|
vmContext := vm.BlockContext{
|
||||||
CanTransfer: core.CanTransfer,
|
CanTransfer: core.CanTransfer,
|
||||||
Transfer: core.Transfer,
|
Transfer: core.Transfer,
|
||||||
|
|
@ -192,6 +195,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
ExcessBlobGas: pre.Env.ParentExcessBlobGas,
|
ExcessBlobGas: pre.Env.ParentExcessBlobGas,
|
||||||
BlobGasUsed: pre.Env.ParentBlobGasUsed,
|
BlobGasUsed: pre.Env.ParentBlobGasUsed,
|
||||||
BaseFee: pre.Env.ParentBaseFee,
|
BaseFee: pre.Env.ParentBaseFee,
|
||||||
|
SlotNumber: pre.Env.SlotNumber,
|
||||||
}
|
}
|
||||||
header := &types.Header{
|
header := &types.Header{
|
||||||
Time: pre.Env.Timestamp,
|
Time: pre.Env.Timestamp,
|
||||||
|
|
@ -252,16 +256,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
statedb.SetTxContext(tx.Hash(), len(receipts))
|
statedb.SetTxContext(tx.Hash(), len(receipts))
|
||||||
var (
|
var (
|
||||||
snapshot = statedb.Snapshot()
|
snapshot = statedb.Snapshot()
|
||||||
prevGas = gaspool.Gas()
|
gp = gaspool.Snapshot()
|
||||||
)
|
)
|
||||||
receipt, err := core.ApplyTransactionWithEVM(msg, gaspool, statedb, vmContext.BlockNumber, blockHash, pre.Env.Timestamp, tx, &gasUsed, evm)
|
receipt, err := core.ApplyTransactionWithEVM(msg, gaspool, statedb, vmContext.BlockNumber, blockHash, pre.Env.Timestamp, tx, evm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
statedb.RevertToSnapshot(snapshot)
|
statedb.RevertToSnapshot(snapshot)
|
||||||
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err)
|
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err)
|
||||||
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
|
||||||
gaspool.SetGas(prevGas)
|
gaspool.Set(gp)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if receipt.Logs == nil {
|
||||||
|
receipt.Logs = []*types.Log{}
|
||||||
|
}
|
||||||
includedTxs = append(includedTxs, tx)
|
includedTxs = append(includedTxs, tx)
|
||||||
if hashError != nil {
|
if hashError != nil {
|
||||||
return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError)
|
return nil, nil, nil, NewError(ErrorMissingBlockhash, hashError)
|
||||||
|
|
@ -301,6 +308,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
// Amount is in gwei, turn into wei
|
// Amount is in gwei, turn into wei
|
||||||
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
|
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
|
||||||
statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
|
statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
|
||||||
|
|
||||||
|
if isEIP4762 {
|
||||||
|
statedb.AccessEvents().AddAccount(w.Address, true, stdmath.MaxUint64)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gather the execution-layer triggered requests.
|
// Gather the execution-layer triggered requests.
|
||||||
|
|
@ -339,7 +350,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
Receipts: receipts,
|
Receipts: receipts,
|
||||||
Rejected: rejectedTxs,
|
Rejected: rejectedTxs,
|
||||||
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
|
Difficulty: (*math.HexOrDecimal256)(vmContext.Difficulty),
|
||||||
GasUsed: (math.HexOrDecimal64)(gasUsed),
|
GasUsed: (math.HexOrDecimal64)(gaspool.Used()),
|
||||||
BaseFee: (*math.HexOrDecimal256)(vmContext.BaseFee),
|
BaseFee: (*math.HexOrDecimal256)(vmContext.BaseFee),
|
||||||
}
|
}
|
||||||
if pre.Env.Withdrawals != nil {
|
if pre.Env.Withdrawals != nil {
|
||||||
|
|
@ -354,15 +365,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
// Set requestsHash on block.
|
// Set requestsHash on block.
|
||||||
h := types.CalcRequestsHash(requests)
|
h := types.CalcRequestsHash(requests)
|
||||||
execRs.RequestsHash = &h
|
execRs.RequestsHash = &h
|
||||||
for i := range requests {
|
|
||||||
// remove prefix
|
|
||||||
requests[i] = requests[i][1:]
|
|
||||||
}
|
|
||||||
execRs.Requests = requests
|
execRs.Requests = requests
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-create statedb instance with new root upon the updated database
|
// Re-create statedb instance with new root for MPT mode
|
||||||
// for accessing latest states.
|
|
||||||
statedb, err = state.New(root, statedb.Database())
|
statedb, err = state.New(root, statedb.Database())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
|
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
|
||||||
|
|
@ -371,12 +377,17 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
||||||
return statedb, execRs, body, nil
|
return statedb, execRs, body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
|
func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, isBintrie bool) *state.StateDB {
|
||||||
tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true})
|
tdb := triedb.NewDatabase(db, &triedb.Config{Preimages: true, IsVerkle: isBintrie})
|
||||||
sdb := state.NewDatabase(tdb, nil)
|
sdb := state.NewDatabase(tdb, nil)
|
||||||
statedb, err := state.New(types.EmptyRootHash, sdb)
|
|
||||||
|
root := types.EmptyRootHash
|
||||||
|
if isBintrie {
|
||||||
|
root = types.EmptyBinaryHash
|
||||||
|
}
|
||||||
|
statedb, err := state.New(root, sdb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("failed to create initial state: %v", err))
|
panic(fmt.Errorf("failed to create initial statedb: %v", err))
|
||||||
}
|
}
|
||||||
for addr, a := range accounts {
|
for addr, a := range accounts {
|
||||||
statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified)
|
statedb.SetCode(addr, a.Code, tracing.CodeChangeUnspecified)
|
||||||
|
|
@ -387,10 +398,15 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Commit and re-open to start with a clean state.
|
// Commit and re-open to start with a clean state.
|
||||||
root, err := statedb.Commit(0, false, false)
|
root, err = statedb.Commit(0, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("failed to commit initial state: %v", err))
|
panic(fmt.Errorf("failed to commit initial state: %v", err))
|
||||||
}
|
}
|
||||||
|
// If bintrie mode started, check if conversion happened
|
||||||
|
if isBintrie {
|
||||||
|
return statedb
|
||||||
|
}
|
||||||
|
// For MPT mode, reopen the state with the committed root
|
||||||
statedb, err = state.New(root, sdb)
|
statedb, err = state.New(root, sdb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("failed to reopen state after commit: %v", err))
|
panic(fmt.Errorf("failed to reopen state after commit: %v", err))
|
||||||
|
|
@ -398,8 +414,8 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
|
||||||
return statedb
|
return statedb
|
||||||
}
|
}
|
||||||
|
|
||||||
func rlpHash(x interface{}) (h common.Hash) {
|
func rlpHash(x any) (h common.Hash) {
|
||||||
hw := sha3.NewLegacyKeccak256()
|
hw := keccak.NewLegacyKeccak256()
|
||||||
rlp.Encode(hw, x)
|
rlp.Encode(hw, x)
|
||||||
hw.Sum(h[:0])
|
hw.Sum(h[:0])
|
||||||
return h
|
return h
|
||||||
|
|
|
||||||
|
|
@ -56,27 +56,35 @@ func (l *fileWritingTracer) Write(p []byte) (n int, err error) {
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFileWriter creates a set of hooks which wraps inner hooks (typically a logger),
|
// newFileWriter creates a tracer which wraps inner hooks (typically a logger),
|
||||||
// and writes the output to a file, one file per transaction.
|
// and writes the output to a file, one file per transaction.
|
||||||
func newFileWriter(baseDir string, innerFn func(out io.Writer) *tracing.Hooks) *tracing.Hooks {
|
func newFileWriter(baseDir string, innerFn func(out io.Writer) *tracing.Hooks) *tracers.Tracer {
|
||||||
t := &fileWritingTracer{
|
t := &fileWritingTracer{
|
||||||
baseDir: baseDir,
|
baseDir: baseDir,
|
||||||
suffix: "jsonl",
|
suffix: "jsonl",
|
||||||
}
|
}
|
||||||
t.inner = innerFn(t) // instantiate the inner tracer
|
t.inner = innerFn(t) // instantiate the inner tracer
|
||||||
return t.hooks()
|
return &tracers.Tracer{
|
||||||
|
Hooks: t.hooks(),
|
||||||
|
GetResult: func() (json.RawMessage, error) { return json.RawMessage("{}"), nil },
|
||||||
|
Stop: func(err error) {},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// newResultWriter creates a set of hooks wraps and invokes an underlying tracer,
|
// newResultWriter creates a tracer that wraps and invokes an underlying tracer,
|
||||||
// and writes the result (getResult-output) to file, one per transaction.
|
// and writes the result (getResult-output) to file, one per transaction.
|
||||||
func newResultWriter(baseDir string, tracer *tracers.Tracer) *tracing.Hooks {
|
func newResultWriter(baseDir string, tracer *tracers.Tracer) *tracers.Tracer {
|
||||||
t := &fileWritingTracer{
|
t := &fileWritingTracer{
|
||||||
baseDir: baseDir,
|
baseDir: baseDir,
|
||||||
getResult: tracer.GetResult,
|
getResult: tracer.GetResult,
|
||||||
inner: tracer.Hooks,
|
inner: tracer.Hooks,
|
||||||
suffix: "json",
|
suffix: "json",
|
||||||
}
|
}
|
||||||
return t.hooks()
|
return &tracers.Tracer{
|
||||||
|
Hooks: t.hooks(),
|
||||||
|
GetResult: func() (json.RawMessage, error) { return json.RawMessage("{}"), nil },
|
||||||
|
Stop: func(err error) {},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnTxStart creates a new output-file specific for this transaction, and invokes
|
// OnTxStart creates a new output-file specific for this transaction, and invokes
|
||||||
|
|
|
||||||
|
|
@ -88,6 +88,14 @@ var (
|
||||||
"\t<file> - into the file <file> ",
|
"\t<file> - into the file <file> ",
|
||||||
Value: "block.json",
|
Value: "block.json",
|
||||||
}
|
}
|
||||||
|
OutputBTFlag = &cli.StringFlag{
|
||||||
|
Name: "output.vkt",
|
||||||
|
Usage: "Determines where to put the `BT` of the post-state.\n" +
|
||||||
|
"\t`stdout` - into the stdout output\n" +
|
||||||
|
"\t`stderr` - into the stderr output\n" +
|
||||||
|
"\t<file> - into the file <file> ",
|
||||||
|
Value: "vkt.json",
|
||||||
|
}
|
||||||
InputAllocFlag = &cli.StringFlag{
|
InputAllocFlag = &cli.StringFlag{
|
||||||
Name: "input.alloc",
|
Name: "input.alloc",
|
||||||
Usage: "`stdin` or file name of where to find the prestate alloc to use.",
|
Usage: "`stdin` or file name of where to find the prestate alloc to use.",
|
||||||
|
|
@ -123,6 +131,11 @@ var (
|
||||||
Usage: "`stdin` or file name of where to find the transactions list in RLP form.",
|
Usage: "`stdin` or file name of where to find the transactions list in RLP form.",
|
||||||
Value: "txs.rlp",
|
Value: "txs.rlp",
|
||||||
}
|
}
|
||||||
|
// TODO(@CPerezz): rename `Name` of the file in a follow-up PR (relays on EEST -> https://github.com/ethereum/execution-spec-tests/tree/verkle/main)
|
||||||
|
InputBTFlag = &cli.StringFlag{
|
||||||
|
Name: "input.vkt",
|
||||||
|
Usage: "`stdin` or file name of where to find the prestate BT.",
|
||||||
|
}
|
||||||
SealCliqueFlag = &cli.StringFlag{
|
SealCliqueFlag = &cli.StringFlag{
|
||||||
Name: "seal.clique",
|
Name: "seal.clique",
|
||||||
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",
|
Usage: "Seal block with Clique. `stdin` or file name of where to find the Clique sealing data.",
|
||||||
|
|
@ -149,6 +162,11 @@ var (
|
||||||
strings.Join(vm.ActivateableEips(), ", ")),
|
strings.Join(vm.ActivateableEips(), ", ")),
|
||||||
Value: "GrayGlacier",
|
Value: "GrayGlacier",
|
||||||
}
|
}
|
||||||
|
OpcodeCountFlag = &cli.StringFlag{
|
||||||
|
Name: "opcode.count",
|
||||||
|
Usage: "If set, opcode execution counts will be written to this file (relative to output.basedir).",
|
||||||
|
Value: "",
|
||||||
|
}
|
||||||
VerbosityFlag = &cli.IntFlag{
|
VerbosityFlag = &cli.IntFlag{
|
||||||
Name: "verbosity",
|
Name: "verbosity",
|
||||||
Usage: "sets the verbosity level",
|
Usage: "sets the verbosity level",
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,7 @@ func (h header) MarshalJSON() ([]byte, error) {
|
||||||
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
|
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
|
||||||
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
|
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
|
||||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
||||||
|
SlotNumber *math.HexOrDecimal64 `json:"slotNumber" rlp:"optional"`
|
||||||
}
|
}
|
||||||
var enc header
|
var enc header
|
||||||
enc.ParentHash = h.ParentHash
|
enc.ParentHash = h.ParentHash
|
||||||
|
|
@ -60,6 +61,7 @@ func (h header) MarshalJSON() ([]byte, error) {
|
||||||
enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed)
|
enc.BlobGasUsed = (*math.HexOrDecimal64)(h.BlobGasUsed)
|
||||||
enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas)
|
enc.ExcessBlobGas = (*math.HexOrDecimal64)(h.ExcessBlobGas)
|
||||||
enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot
|
enc.ParentBeaconBlockRoot = h.ParentBeaconBlockRoot
|
||||||
|
enc.SlotNumber = (*math.HexOrDecimal64)(h.SlotNumber)
|
||||||
return json.Marshal(&enc)
|
return json.Marshal(&enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -86,6 +88,7 @@ func (h *header) UnmarshalJSON(input []byte) error {
|
||||||
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
|
BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed" rlp:"optional"`
|
||||||
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
|
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas" rlp:"optional"`
|
||||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot" rlp:"optional"`
|
||||||
|
SlotNumber *math.HexOrDecimal64 `json:"slotNumber" rlp:"optional"`
|
||||||
}
|
}
|
||||||
var dec header
|
var dec header
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
|
|
@ -155,5 +158,8 @@ func (h *header) UnmarshalJSON(input []byte) error {
|
||||||
if dec.ParentBeaconBlockRoot != nil {
|
if dec.ParentBeaconBlockRoot != nil {
|
||||||
h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
|
h.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
|
||||||
}
|
}
|
||||||
|
if dec.SlotNumber != nil {
|
||||||
|
h.SlotNumber = (*uint64)(dec.SlotNumber)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
||||||
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
|
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
|
||||||
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
|
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
|
||||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||||
|
SlotNumber *math.HexOrDecimal64 `json:"slotNumber"`
|
||||||
}
|
}
|
||||||
var enc stEnv
|
var enc stEnv
|
||||||
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
||||||
|
|
@ -59,6 +60,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
||||||
enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas)
|
enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas)
|
||||||
enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed)
|
enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed)
|
||||||
enc.ParentBeaconBlockRoot = s.ParentBeaconBlockRoot
|
enc.ParentBeaconBlockRoot = s.ParentBeaconBlockRoot
|
||||||
|
enc.SlotNumber = (*math.HexOrDecimal64)(s.SlotNumber)
|
||||||
return json.Marshal(&enc)
|
return json.Marshal(&enc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -85,6 +87,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
||||||
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
|
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
|
||||||
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
|
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
|
||||||
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
|
||||||
|
SlotNumber *math.HexOrDecimal64 `json:"slotNumber"`
|
||||||
}
|
}
|
||||||
var dec stEnv
|
var dec stEnv
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
|
|
@ -154,5 +157,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
||||||
if dec.ParentBeaconBlockRoot != nil {
|
if dec.ParentBeaconBlockRoot != nil {
|
||||||
s.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
|
s.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
|
||||||
}
|
}
|
||||||
|
if dec.SlotNumber != nil {
|
||||||
|
s.SlotNumber = (*uint64)(dec.SlotNumber)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,9 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
|
|
@ -115,9 +117,6 @@ func Transaction(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
var results []result
|
var results []result
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
if err := it.Err(); err != nil {
|
|
||||||
return NewError(ErrorIO, err)
|
|
||||||
}
|
|
||||||
var tx types.Transaction
|
var tx types.Transaction
|
||||||
err := rlp.DecodeBytes(it.Value(), &tx)
|
err := rlp.DecodeBytes(it.Value(), &tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -180,14 +179,21 @@ func Transaction(ctx *cli.Context) error {
|
||||||
r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits")
|
r.Error = errors.New("gas * maxFeePerGas exceeds 256 bits")
|
||||||
}
|
}
|
||||||
// Check whether the init code size has been exceeded.
|
// Check whether the init code size has been exceeded.
|
||||||
if chainConfig.IsShanghai(new(big.Int), 0) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize {
|
if tx.To() == nil {
|
||||||
r.Error = errors.New("max initcode size exceeded")
|
if err := vm.CheckMaxInitCodeSize(&rules, uint64(len(tx.Data()))); err != nil {
|
||||||
|
r.Error = err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if chainConfig.IsOsaka(new(big.Int), 0) && tx.Gas() > params.MaxTxGas {
|
if chainConfig.IsOsaka(new(big.Int), 0) && tx.Gas() > params.MaxTxGas {
|
||||||
r.Error = errors.New("gas limit exceeds maximum")
|
r.Error = errors.New("gas limit exceeds maximum")
|
||||||
}
|
}
|
||||||
results = append(results, r)
|
results = append(results, r)
|
||||||
}
|
}
|
||||||
|
if err := it.Err(); err != nil {
|
||||||
|
return NewError(ErrorIO, err)
|
||||||
|
}
|
||||||
|
|
||||||
out, err := json.MarshalIndent(results, "", " ")
|
out, err := json.MarshalIndent(results, "", " ")
|
||||||
fmt.Println(string(out))
|
fmt.Println(string(out))
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -28,15 +28,23 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/tracing"
|
"github.com/ethereum/go-ethereum/core/tracing"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||||
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/tracers/native"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/tests"
|
"github.com/ethereum/go-ethereum/tests"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/bintrie"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/database"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -77,6 +85,7 @@ var (
|
||||||
type input struct {
|
type input struct {
|
||||||
Alloc types.GenesisAlloc `json:"alloc,omitempty"`
|
Alloc types.GenesisAlloc `json:"alloc,omitempty"`
|
||||||
Env *stEnv `json:"env,omitempty"`
|
Env *stEnv `json:"env,omitempty"`
|
||||||
|
BT map[common.Hash]hexutil.Bytes `json:"vkt,omitempty"`
|
||||||
Txs []*txWithKey `json:"txs,omitempty"`
|
Txs []*txWithKey `json:"txs,omitempty"`
|
||||||
TxRlp string `json:"txsRlp,omitempty"`
|
TxRlp string `json:"txsRlp,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
@ -93,13 +102,13 @@ func Transition(ctx *cli.Context) error {
|
||||||
prestate Prestate
|
prestate Prestate
|
||||||
txIt txIterator // txs to apply
|
txIt txIterator // txs to apply
|
||||||
allocStr = ctx.String(InputAllocFlag.Name)
|
allocStr = ctx.String(InputAllocFlag.Name)
|
||||||
|
btStr = ctx.String(InputBTFlag.Name)
|
||||||
envStr = ctx.String(InputEnvFlag.Name)
|
envStr = ctx.String(InputEnvFlag.Name)
|
||||||
txStr = ctx.String(InputTxsFlag.Name)
|
txStr = ctx.String(InputTxsFlag.Name)
|
||||||
inputData = &input{}
|
inputData = &input{}
|
||||||
)
|
)
|
||||||
// Figure out the prestate alloc
|
// Figure out the prestate alloc
|
||||||
if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector {
|
if allocStr == stdinSelector || btStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector {
|
||||||
decoder := json.NewDecoder(os.Stdin)
|
decoder := json.NewDecoder(os.Stdin)
|
||||||
if err := decoder.Decode(inputData); err != nil {
|
if err := decoder.Decode(inputData); err != nil {
|
||||||
return NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err))
|
return NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err))
|
||||||
|
|
@ -112,6 +121,13 @@ func Transition(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
prestate.Pre = inputData.Alloc
|
prestate.Pre = inputData.Alloc
|
||||||
|
|
||||||
|
if btStr != stdinSelector && btStr != "" {
|
||||||
|
if err := readFile(btStr, "BT", &inputData.BT); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prestate.TreeLeaves = inputData.BT
|
||||||
|
|
||||||
// Set the block environment
|
// Set the block environment
|
||||||
if envStr != stdinSelector {
|
if envStr != stdinSelector {
|
||||||
var env stEnv
|
var env stEnv
|
||||||
|
|
@ -152,14 +168,15 @@ func Transition(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure tracer
|
// Configure tracer
|
||||||
|
var tracer *tracers.Tracer
|
||||||
if ctx.IsSet(TraceTracerFlag.Name) { // Custom tracing
|
if ctx.IsSet(TraceTracerFlag.Name) { // Custom tracing
|
||||||
config := json.RawMessage(ctx.String(TraceTracerConfigFlag.Name))
|
config := json.RawMessage(ctx.String(TraceTracerConfigFlag.Name))
|
||||||
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name),
|
innerTracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name),
|
||||||
nil, config, chainConfig)
|
nil, config, chainConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %v", err))
|
return NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %v", err))
|
||||||
}
|
}
|
||||||
vmConfig.Tracer = newResultWriter(baseDir, tracer)
|
tracer = newResultWriter(baseDir, innerTracer)
|
||||||
} else if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing
|
} else if ctx.Bool(TraceFlag.Name) { // JSON opcode tracing
|
||||||
logConfig := &logger.Config{
|
logConfig := &logger.Config{
|
||||||
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
DisableStack: ctx.Bool(TraceDisableStackFlag.Name),
|
||||||
|
|
@ -167,24 +184,61 @@ func Transition(ctx *cli.Context) error {
|
||||||
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
|
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
|
||||||
}
|
}
|
||||||
if ctx.Bool(TraceEnableCallFramesFlag.Name) {
|
if ctx.Bool(TraceEnableCallFramesFlag.Name) {
|
||||||
vmConfig.Tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
|
tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
|
||||||
return logger.NewJSONLoggerWithCallFrames(logConfig, out)
|
return logger.NewJSONLoggerWithCallFrames(logConfig, out)
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
vmConfig.Tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
|
tracer = newFileWriter(baseDir, func(out io.Writer) *tracing.Hooks {
|
||||||
return logger.NewJSONLogger(logConfig, out)
|
return logger.NewJSONLogger(logConfig, out)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Configure opcode counter
|
||||||
|
var opcodeTracer *tracers.Tracer
|
||||||
|
if ctx.IsSet(OpcodeCountFlag.Name) && ctx.String(OpcodeCountFlag.Name) != "" {
|
||||||
|
opcodeTracer = native.NewOpcodeCounter()
|
||||||
|
if tracer != nil {
|
||||||
|
// If we have an existing tracer, multiplex with the opcode tracer
|
||||||
|
mux, _ := native.NewMuxTracer([]string{"trace", "opcode"}, []*tracers.Tracer{tracer, opcodeTracer})
|
||||||
|
vmConfig.Tracer = mux.Hooks
|
||||||
|
} else {
|
||||||
|
vmConfig.Tracer = opcodeTracer.Hooks
|
||||||
|
}
|
||||||
|
} else if tracer != nil {
|
||||||
|
vmConfig.Tracer = tracer.Hooks
|
||||||
|
}
|
||||||
// Run the test and aggregate the result
|
// Run the test and aggregate the result
|
||||||
s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name))
|
s, result, body, err := prestate.Apply(vmConfig, chainConfig, txIt, ctx.Int64(RewardFlag.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Write opcode counts if enabled
|
||||||
|
if opcodeTracer != nil {
|
||||||
|
fname := ctx.String(OpcodeCountFlag.Name)
|
||||||
|
result, err := opcodeTracer.GetResult()
|
||||||
|
if err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed getting opcode counts: %v", err))
|
||||||
|
}
|
||||||
|
if err := saveFile(baseDir, fname, result); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
// Dump the execution result
|
// Dump the execution result
|
||||||
collector := make(Alloc)
|
var (
|
||||||
|
collector = make(Alloc)
|
||||||
|
btleaves map[common.Hash]hexutil.Bytes
|
||||||
|
)
|
||||||
|
isBinary := chainConfig.IsVerkle(big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp)
|
||||||
|
if !isBinary {
|
||||||
s.DumpToCollector(collector, nil)
|
s.DumpToCollector(collector, nil)
|
||||||
return dispatchOutput(ctx, baseDir, result, collector, body)
|
} else {
|
||||||
|
btleaves = make(map[common.Hash]hexutil.Bytes)
|
||||||
|
if err := s.DumpBinTrieLeaves(btleaves); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dispatchOutput(ctx, baseDir, result, collector, body, btleaves)
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error {
|
func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error {
|
||||||
|
|
@ -306,7 +360,7 @@ func saveFile(baseDir, filename string, data interface{}) error {
|
||||||
|
|
||||||
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
|
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
|
||||||
// files
|
// files
|
||||||
func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error {
|
func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes, bt map[common.Hash]hexutil.Bytes) error {
|
||||||
stdOutObject := make(map[string]interface{})
|
stdOutObject := make(map[string]interface{})
|
||||||
stdErrObject := make(map[string]interface{})
|
stdErrObject := make(map[string]interface{})
|
||||||
dispatch := func(baseDir, fName, name string, obj interface{}) error {
|
dispatch := func(baseDir, fName, name string, obj interface{}) error {
|
||||||
|
|
@ -333,6 +387,13 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
|
||||||
if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil {
|
if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Only write bt output if we actually have binary trie leaves
|
||||||
|
if bt != nil {
|
||||||
|
if err := dispatch(baseDir, ctx.String(OutputBTFlag.Name), "vkt", bt); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(stdOutObject) > 0 {
|
if len(stdOutObject) > 0 {
|
||||||
b, err := json.MarshalIndent(stdOutObject, "", " ")
|
b, err := json.MarshalIndent(stdOutObject, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -351,3 +412,168 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BinKey computes the tree key given an address and an optional slot number.
|
||||||
|
func BinKey(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 {
|
||||||
|
return errors.New("invalid number of arguments: expecting an address and an optional slot number")
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := hexutil.Decode(ctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error decoding address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.Args().Len() == 2 {
|
||||||
|
slot, err := hexutil.Decode(ctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error decoding slot: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyStorageSlot(common.BytesToAddress(addr), slot))
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyBasicData(common.BytesToAddress(addr)))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinKeys computes a set of tree keys given a genesis alloc.
|
||||||
|
func BinKeys(ctx *cli.Context) error {
|
||||||
|
var allocStr = ctx.String(InputAllocFlag.Name)
|
||||||
|
var alloc core.GenesisAlloc
|
||||||
|
// Figure out the prestate alloc
|
||||||
|
if allocStr == stdinSelector {
|
||||||
|
decoder := json.NewDecoder(os.Stdin)
|
||||||
|
if err := decoder.Decode(&alloc); err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allocStr != stdinSelector {
|
||||||
|
if err := readFile(allocStr, "alloc", &alloc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
bt, err := genBinTrieFromAlloc(alloc, db)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error generating bt: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
collector := make(map[common.Hash]hexutil.Bytes)
|
||||||
|
it, err := bt.NodeIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
for it.Next(true) {
|
||||||
|
if it.Leaf() {
|
||||||
|
collector[common.BytesToHash(it.LeafKey())] = it.LeafBlob()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := json.MarshalIndent(collector, "", "")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error outputting tree: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(string(output))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinTrieRoot computes the root of a Binary Trie from a genesis alloc.
|
||||||
|
func BinTrieRoot(ctx *cli.Context) error {
|
||||||
|
var allocStr = ctx.String(InputAllocFlag.Name)
|
||||||
|
var alloc core.GenesisAlloc
|
||||||
|
if allocStr == stdinSelector {
|
||||||
|
decoder := json.NewDecoder(os.Stdin)
|
||||||
|
if err := decoder.Decode(&alloc); err != nil {
|
||||||
|
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allocStr != stdinSelector {
|
||||||
|
if err := readFile(allocStr, "alloc", &alloc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
db := triedb.NewDatabase(rawdb.NewMemoryDatabase(), triedb.VerkleDefaults)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
bt, err := genBinTrieFromAlloc(alloc, db)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error generating bt: %w", err)
|
||||||
|
}
|
||||||
|
fmt.Println(bt.Hash().Hex())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(@CPerezz): Should this go to `bintrie` module?
|
||||||
|
func genBinTrieFromAlloc(alloc core.GenesisAlloc, db database.NodeDatabase) (*bintrie.BinaryTrie, error) {
|
||||||
|
bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, db)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for addr, acc := range alloc {
|
||||||
|
for slot, value := range acc.Storage {
|
||||||
|
err := bt.UpdateStorage(addr, slot.Bytes(), value.Big().Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error inserting storage: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
account := &types.StateAccount{
|
||||||
|
Balance: uint256.MustFromBig(acc.Balance),
|
||||||
|
Nonce: acc.Nonce,
|
||||||
|
CodeHash: crypto.Keccak256Hash(acc.Code).Bytes(),
|
||||||
|
Root: common.Hash{},
|
||||||
|
}
|
||||||
|
err := bt.UpdateAccount(addr, account, len(acc.Code))
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error inserting account: %w", err)
|
||||||
|
}
|
||||||
|
err = bt.UpdateContractCode(addr, common.BytesToHash(account.CodeHash), acc.Code)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error inserting code: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinaryCodeChunkKey computes the tree key of a code-chunk for a given address.
|
||||||
|
func BinaryCodeChunkKey(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() == 0 || ctx.Args().Len() > 2 {
|
||||||
|
return errors.New("invalid number of arguments: expecting an address and an code-chunk number")
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := hexutil.Decode(ctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error decoding address: %w", err)
|
||||||
|
}
|
||||||
|
chunkNumberBytes, err := hexutil.Decode(ctx.Args().Get(1))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error decoding chunk number: %w", err)
|
||||||
|
}
|
||||||
|
var chunkNumber uint256.Int
|
||||||
|
chunkNumber.SetBytes(chunkNumberBytes)
|
||||||
|
|
||||||
|
fmt.Printf("%#x\n", bintrie.GetBinaryTreeKeyCodeChunk(common.BytesToAddress(addr), &chunkNumber))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinaryCodeChunkCode returns the code chunkification for a given code.
|
||||||
|
func BinaryCodeChunkCode(ctx *cli.Context) error {
|
||||||
|
if ctx.Args().Len() == 0 || ctx.Args().Len() > 1 {
|
||||||
|
return errors.New("invalid number of arguments: expecting a bytecode")
|
||||||
|
}
|
||||||
|
|
||||||
|
bytecode, err := hexutil.Decode(ctx.Args().Get(0))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error decoding address: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkedCode := bintrie.ChunkifyCode(bytecode)
|
||||||
|
fmt.Printf("%#x\n", chunkedCode)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -55,6 +55,11 @@ var (
|
||||||
Usage: "benchmark the execution",
|
Usage: "benchmark the execution",
|
||||||
Category: flags.VMCategory,
|
Category: flags.VMCategory,
|
||||||
}
|
}
|
||||||
|
FuzzFlag = &cli.BoolFlag{
|
||||||
|
Name: "fuzz",
|
||||||
|
Usage: "adapts output format for fuzzing",
|
||||||
|
Category: flags.VMCategory,
|
||||||
|
}
|
||||||
WitnessCrossCheckFlag = &cli.BoolFlag{
|
WitnessCrossCheckFlag = &cli.BoolFlag{
|
||||||
Name: "cross-check",
|
Name: "cross-check",
|
||||||
Aliases: []string{"xc"},
|
Aliases: []string{"xc"},
|
||||||
|
|
@ -146,16 +151,64 @@ var (
|
||||||
t8ntool.TraceEnableCallFramesFlag,
|
t8ntool.TraceEnableCallFramesFlag,
|
||||||
t8ntool.OutputBasedir,
|
t8ntool.OutputBasedir,
|
||||||
t8ntool.OutputAllocFlag,
|
t8ntool.OutputAllocFlag,
|
||||||
|
t8ntool.OutputBTFlag,
|
||||||
t8ntool.OutputResultFlag,
|
t8ntool.OutputResultFlag,
|
||||||
t8ntool.OutputBodyFlag,
|
t8ntool.OutputBodyFlag,
|
||||||
t8ntool.InputAllocFlag,
|
t8ntool.InputAllocFlag,
|
||||||
t8ntool.InputEnvFlag,
|
t8ntool.InputEnvFlag,
|
||||||
|
t8ntool.InputBTFlag,
|
||||||
t8ntool.InputTxsFlag,
|
t8ntool.InputTxsFlag,
|
||||||
t8ntool.ForknameFlag,
|
t8ntool.ForknameFlag,
|
||||||
t8ntool.ChainIDFlag,
|
t8ntool.ChainIDFlag,
|
||||||
t8ntool.RewardFlag,
|
t8ntool.RewardFlag,
|
||||||
|
t8ntool.OpcodeCountFlag,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
verkleCommand = &cli.Command{
|
||||||
|
Name: "verkle",
|
||||||
|
Aliases: []string{"vkt"},
|
||||||
|
Usage: "Binary Trie helpers",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "tree-keys",
|
||||||
|
Aliases: []string{"v"},
|
||||||
|
Usage: "compute a set of binary trie keys, given their source addresses and optional slot numbers",
|
||||||
|
Action: t8ntool.BinKeys,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
t8ntool.InputAllocFlag,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "single-key",
|
||||||
|
Aliases: []string{"vk"},
|
||||||
|
Usage: "compute the binary trie key given an address and optional slot number",
|
||||||
|
Action: t8ntool.BinKey,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "code-chunk-key",
|
||||||
|
Aliases: []string{"vck"},
|
||||||
|
Usage: "compute the binary trie key given an address and chunk number",
|
||||||
|
Action: t8ntool.BinaryCodeChunkKey,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "chunkify-code",
|
||||||
|
Aliases: []string{"vcc"},
|
||||||
|
Usage: "chunkify a given bytecode for a binary trie",
|
||||||
|
Action: t8ntool.BinaryCodeChunkCode,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "state-root",
|
||||||
|
Aliases: []string{"vsr"},
|
||||||
|
Usage: "compute the state-root of a binary trie for the given alloc",
|
||||||
|
Action: t8ntool.BinTrieRoot,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
t8ntool.InputAllocFlag,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
transactionCommand = &cli.Command{
|
transactionCommand = &cli.Command{
|
||||||
Name: "transaction",
|
Name: "transaction",
|
||||||
Aliases: []string{"t9n"},
|
Aliases: []string{"t9n"},
|
||||||
|
|
@ -210,6 +263,7 @@ func init() {
|
||||||
stateTransitionCommand,
|
stateTransitionCommand,
|
||||||
transactionCommand,
|
transactionCommand,
|
||||||
blockBuilderCommand,
|
blockBuilderCommand,
|
||||||
|
verkleCommand,
|
||||||
}
|
}
|
||||||
app.Before = func(ctx *cli.Context) error {
|
app.Before = func(ctx *cli.Context) error {
|
||||||
flags.MigrateGlobalFlags(ctx)
|
flags.MigrateGlobalFlags(ctx)
|
||||||
|
|
|
||||||
2
cmd/evm/testdata/1/exp.json
vendored
2
cmd/evm/testdata/1/exp.json
vendored
|
|
@ -24,7 +24,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
|
"transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x5208",
|
"gasUsed": "0x5208",
|
||||||
|
|
|
||||||
4
cmd/evm/testdata/13/exp2.json
vendored
4
cmd/evm/testdata/13/exp2.json
vendored
|
|
@ -12,7 +12,7 @@
|
||||||
"status": "0x0",
|
"status": "0x0",
|
||||||
"cumulativeGasUsed": "0x84d0",
|
"cumulativeGasUsed": "0x84d0",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x84d0",
|
"gasUsed": "0x84d0",
|
||||||
|
|
@ -27,7 +27,7 @@
|
||||||
"status": "0x0",
|
"status": "0x0",
|
||||||
"cumulativeGasUsed": "0x109a0",
|
"cumulativeGasUsed": "0x109a0",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x84d0",
|
"gasUsed": "0x84d0",
|
||||||
|
|
|
||||||
2
cmd/evm/testdata/23/exp.json
vendored
2
cmd/evm/testdata/23/exp.json
vendored
|
|
@ -11,7 +11,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x520b",
|
"cumulativeGasUsed": "0x520b",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
|
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x520b",
|
"gasUsed": "0x520b",
|
||||||
|
|
|
||||||
4
cmd/evm/testdata/24/exp.json
vendored
4
cmd/evm/testdata/24/exp.json
vendored
|
|
@ -27,7 +27,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0xa861",
|
"cumulativeGasUsed": "0xa861",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941",
|
"transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0xa861",
|
"gasUsed": "0xa861",
|
||||||
|
|
@ -41,7 +41,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x10306",
|
"cumulativeGasUsed": "0x10306",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x16b1d912f1d664f3f60f4e1b5f296f3c82a64a1a253117b4851d18bc03c4f1da",
|
"transactionHash": "0x16b1d912f1d664f3f60f4e1b5f296f3c82a64a1a253117b4851d18bc03c4f1da",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x5aa5",
|
"gasUsed": "0x5aa5",
|
||||||
|
|
|
||||||
2
cmd/evm/testdata/25/exp.json
vendored
2
cmd/evm/testdata/25/exp.json
vendored
|
|
@ -23,7 +23,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941",
|
"transactionHash": "0x92ea4a28224d033afb20e0cc2b290d4c7c2d61f6a4800a680e4e19ac962ee941",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x5208",
|
"gasUsed": "0x5208",
|
||||||
|
|
|
||||||
2
cmd/evm/testdata/28/exp.json
vendored
2
cmd/evm/testdata/28/exp.json
vendored
|
|
@ -28,7 +28,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0xa865",
|
"cumulativeGasUsed": "0xa865",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262",
|
"transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0xa865",
|
"gasUsed": "0xa865",
|
||||||
|
|
|
||||||
2
cmd/evm/testdata/29/exp.json
vendored
2
cmd/evm/testdata/29/exp.json
vendored
|
|
@ -26,7 +26,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x84f70aba406a55628a0620f26d260f90aeb6ccc55fed6ec2ac13dd4f727032ed",
|
"transactionHash": "0x84f70aba406a55628a0620f26d260f90aeb6ccc55fed6ec2ac13dd4f727032ed",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x5208",
|
"gasUsed": "0x5208",
|
||||||
|
|
|
||||||
2
cmd/evm/testdata/3/exp.json
vendored
2
cmd/evm/testdata/3/exp.json
vendored
|
|
@ -24,7 +24,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x521f",
|
"cumulativeGasUsed": "0x521f",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
|
"transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x521f",
|
"gasUsed": "0x521f",
|
||||||
|
|
|
||||||
4
cmd/evm/testdata/30/exp.json
vendored
4
cmd/evm/testdata/30/exp.json
vendored
|
|
@ -25,7 +25,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x5208",
|
"cumulativeGasUsed": "0x5208",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
"transactionHash": "0xa98a24882ea90916c6a86da650fbc6b14238e46f0af04a131ce92be897507476",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x5208",
|
"gasUsed": "0x5208",
|
||||||
|
|
@ -40,7 +40,7 @@
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0xa410",
|
"cumulativeGasUsed": "0xa410",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||||
"logs": null,
|
"logs": [],
|
||||||
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
"transactionHash": "0x36bad80acce7040c45fd32764b5c2b2d2e6f778669fb41791f73f546d56e739a",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x5208",
|
"gasUsed": "0x5208",
|
||||||
|
|
|
||||||
2
cmd/evm/testdata/33/exp.json
vendored
2
cmd/evm/testdata/33/exp.json
vendored
|
|
@ -44,7 +44,7 @@
|
||||||
"root": "0x",
|
"root": "0x",
|
||||||
"status": "0x1",
|
"status": "0x1",
|
||||||
"cumulativeGasUsed": "0x15fa9",
|
"cumulativeGasUsed": "0x15fa9",
|
||||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","logs": null,"transactionHash": "0x0417aab7c1d8a3989190c3167c132876ce9b8afd99262c5a0f9d06802de3d7ef",
|
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","logs": [],"transactionHash": "0x0417aab7c1d8a3989190c3167c132876ce9b8afd99262c5a0f9d06802de3d7ef",
|
||||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||||
"gasUsed": "0x15fa9",
|
"gasUsed": "0x15fa9",
|
||||||
"effectiveGasPrice": null,
|
"effectiveGasPrice": null,
|
||||||
|
|
|
||||||
177
cmd/fetchpayload/main.go
Normal file
177
cmd/fetchpayload/main.go
Normal file
|
|
@ -0,0 +1,177 @@
|
||||||
|
// Copyright 2026 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// fetchpayload queries an Ethereum node over RPC, fetches a block and its
|
||||||
|
// execution witness, and writes the combined Payload (ChainID + Block +
|
||||||
|
// Witness) to disk in the format consumed by cmd/keeper.
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/stateless"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethclient"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Payload is duplicated from cmd/keeper/main.go (package main, not importable).
|
||||||
|
type Payload struct {
|
||||||
|
ChainID uint64
|
||||||
|
Block *types.Block
|
||||||
|
Witness *stateless.Witness
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var (
|
||||||
|
rpcURL = flag.String("rpc", "http://localhost:8545", "RPC endpoint URL")
|
||||||
|
blockArg = flag.String("block", "latest", `Block number: decimal, 0x-hex, or "latest"`)
|
||||||
|
format = flag.String("format", "rlp", "Comma-separated output formats: rlp, hex, json")
|
||||||
|
outDir = flag.String("out", "", "Output directory (default: current directory)")
|
||||||
|
)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Parse block number (nil means "latest" in ethclient).
|
||||||
|
blockNum, err := parseBlockNumber(*blockArg)
|
||||||
|
if err != nil {
|
||||||
|
fatal("invalid block number %q: %v", *blockArg, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect to the node.
|
||||||
|
client, err := ethclient.DialContext(ctx, *rpcURL)
|
||||||
|
if err != nil {
|
||||||
|
fatal("failed to connect to %s: %v", *rpcURL, err)
|
||||||
|
}
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
chainID, err := client.ChainID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
fatal("failed to get chain ID: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the block first so we have a concrete number for the witness call,
|
||||||
|
// avoiding a race where "latest" advances between the two RPCs.
|
||||||
|
block, err := client.BlockByNumber(ctx, blockNum)
|
||||||
|
if err != nil {
|
||||||
|
fatal("failed to fetch block: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Fetched block %d (%#x)\n", block.NumberU64(), block.Hash())
|
||||||
|
|
||||||
|
// Fetch the execution witness via the debug namespace.
|
||||||
|
var extWitness stateless.ExtWitness
|
||||||
|
err = client.Client().CallContext(ctx, &extWitness, "debug_executionWitness", rpc.BlockNumber(block.NumberU64()))
|
||||||
|
if err != nil {
|
||||||
|
fatal("failed to fetch execution witness: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
witness := new(stateless.Witness)
|
||||||
|
err = witness.FromExtWitness(&extWitness)
|
||||||
|
if err != nil {
|
||||||
|
fatal("failed to convert witness: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
payload := Payload{
|
||||||
|
ChainID: chainID.Uint64(),
|
||||||
|
Block: block,
|
||||||
|
Witness: witness,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode payload as RLP (shared by "rlp" and "hex" formats).
|
||||||
|
rlpBytes, err := rlp.EncodeToBytes(payload)
|
||||||
|
if err != nil {
|
||||||
|
fatal("failed to RLP-encode payload: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write one output file per requested format.
|
||||||
|
blockHex := fmt.Sprintf("%x", block.NumberU64())
|
||||||
|
for f := range strings.SplitSeq(*format, ",") {
|
||||||
|
f = strings.TrimSpace(f)
|
||||||
|
outPath := filepath.Join(*outDir, fmt.Sprintf("%s_payload.%s", blockHex, f))
|
||||||
|
|
||||||
|
var data []byte
|
||||||
|
switch f {
|
||||||
|
case "rlp":
|
||||||
|
data = rlpBytes
|
||||||
|
case "hex":
|
||||||
|
data = []byte(hexutil.Encode(rlpBytes))
|
||||||
|
case "json":
|
||||||
|
data, err = marshalJSONPayload(chainID, block, &extWitness)
|
||||||
|
if err != nil {
|
||||||
|
fatal("failed to JSON-encode payload: %v", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fatal("unknown format %q (valid: rlp, hex, json)", f)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile(outPath, data, 0644); err != nil {
|
||||||
|
fatal("failed to write %s: %v", outPath, err)
|
||||||
|
}
|
||||||
|
fmt.Printf("Wrote %s (%d bytes)\n", outPath, len(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseBlockNumber converts a CLI string to *big.Int.
|
||||||
|
// Returns nil for "latest" (ethclient convention for the head block).
|
||||||
|
func parseBlockNumber(s string) (*big.Int, error) {
|
||||||
|
if strings.EqualFold(s, "latest") {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
n := new(big.Int)
|
||||||
|
if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") {
|
||||||
|
if _, ok := n.SetString(s[2:], 16); !ok {
|
||||||
|
return nil, fmt.Errorf("invalid hex number")
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
if _, ok := n.SetString(s, 10); !ok {
|
||||||
|
return nil, fmt.Errorf("invalid decimal number")
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonPayload is a JSON-friendly representation of Payload. It uses ExtWitness
|
||||||
|
// instead of the internal Witness (which has no JSON marshaling).
|
||||||
|
type jsonPayload struct {
|
||||||
|
ChainID uint64 `json:"chainId"`
|
||||||
|
Block *types.Block `json:"block"`
|
||||||
|
Witness *stateless.ExtWitness `json:"witness"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshalJSONPayload(chainID *big.Int, block *types.Block, ext *stateless.ExtWitness) ([]byte, error) {
|
||||||
|
return json.MarshalIndent(jsonPayload{
|
||||||
|
ChainID: chainID.Uint64(),
|
||||||
|
Block: block,
|
||||||
|
Witness: ext,
|
||||||
|
}, "", " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatal(format string, args ...any) {
|
||||||
|
fmt.Fprintf(os.Stderr, format+"\n", args...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
408
cmd/geth/bintrie_convert.go
Normal file
408
cmd/geth/bintrie_convert.go
Normal file
|
|
@ -0,0 +1,408 @@
|
||||||
|
// Copyright 2026 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
|
"slices"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/bintrie"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
deleteSourceFlag = &cli.BoolFlag{
|
||||||
|
Name: "delete-source",
|
||||||
|
Usage: "Delete MPT trie nodes after conversion",
|
||||||
|
}
|
||||||
|
memoryLimitFlag = &cli.Uint64Flag{
|
||||||
|
Name: "memory-limit",
|
||||||
|
Usage: "Max heap allocation in MB before forcing a commit cycle",
|
||||||
|
Value: 16384,
|
||||||
|
}
|
||||||
|
|
||||||
|
bintrieCommand = &cli.Command{
|
||||||
|
Name: "bintrie",
|
||||||
|
Usage: "A set of commands for binary trie operations",
|
||||||
|
Description: "",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "convert",
|
||||||
|
Usage: "Convert MPT state to binary trie",
|
||||||
|
ArgsUsage: "[state-root]",
|
||||||
|
Action: convertToBinaryTrie,
|
||||||
|
Flags: slices.Concat([]cli.Flag{
|
||||||
|
deleteSourceFlag,
|
||||||
|
memoryLimitFlag,
|
||||||
|
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||||
|
Description: `
|
||||||
|
geth bintrie convert [--delete-source] [--memory-limit MB] [state-root]
|
||||||
|
|
||||||
|
Reads all state from the Merkle Patricia Trie and writes it into a Binary Trie,
|
||||||
|
operating offline. Memory-safe via periodic commit-and-reload cycles.
|
||||||
|
|
||||||
|
The optional state-root argument specifies which state root to convert.
|
||||||
|
If omitted, the head block's state root is used.
|
||||||
|
|
||||||
|
Flags:
|
||||||
|
--delete-source Delete MPT trie nodes after successful conversion
|
||||||
|
--memory-limit Max heap allocation in MB before forcing a commit (default: 16384)
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
type conversionStats struct {
|
||||||
|
accounts uint64
|
||||||
|
slots uint64
|
||||||
|
codes uint64
|
||||||
|
commits uint64
|
||||||
|
start time.Time
|
||||||
|
lastReport time.Time
|
||||||
|
lastMemChk time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *conversionStats) report(force bool) {
|
||||||
|
if !force && time.Since(s.lastReport) < 8*time.Second {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
elapsed := time.Since(s.start).Seconds()
|
||||||
|
acctRate := float64(0)
|
||||||
|
if elapsed > 0 {
|
||||||
|
acctRate = float64(s.accounts) / elapsed
|
||||||
|
}
|
||||||
|
log.Info("Conversion progress",
|
||||||
|
"accounts", s.accounts,
|
||||||
|
"slots", s.slots,
|
||||||
|
"codes", s.codes,
|
||||||
|
"commits", s.commits,
|
||||||
|
"accounts/sec", fmt.Sprintf("%.0f", acctRate),
|
||||||
|
"elapsed", common.PrettyDuration(time.Since(s.start)),
|
||||||
|
)
|
||||||
|
s.lastReport = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertToBinaryTrie(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() > 1 {
|
||||||
|
return errors.New("too many arguments")
|
||||||
|
}
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
chaindb := utils.MakeChainDatabase(ctx, stack, false)
|
||||||
|
defer chaindb.Close()
|
||||||
|
|
||||||
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||||
|
if headBlock == nil {
|
||||||
|
return errors.New("no head block found")
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
root common.Hash
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
if ctx.NArg() == 1 {
|
||||||
|
root, err = parseRoot(ctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid state root: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
root = headBlock.Root()
|
||||||
|
}
|
||||||
|
log.Info("Starting MPT to binary trie conversion", "root", root, "block", headBlock.NumberU64())
|
||||||
|
|
||||||
|
srcTriedb := utils.MakeTrieDatabase(ctx, stack, chaindb, true, true, false)
|
||||||
|
defer srcTriedb.Close()
|
||||||
|
|
||||||
|
destTriedb := triedb.NewDatabase(chaindb, &triedb.Config{
|
||||||
|
IsVerkle: true,
|
||||||
|
PathDB: &pathdb.Config{
|
||||||
|
JournalDirectory: stack.ResolvePath("triedb-bintrie"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
defer destTriedb.Close()
|
||||||
|
|
||||||
|
binTrie, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, destTriedb)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create binary trie: %w", err)
|
||||||
|
}
|
||||||
|
memLimit := ctx.Uint64(memoryLimitFlag.Name) * 1024 * 1024
|
||||||
|
|
||||||
|
currentRoot, err := runConversionLoop(chaindb, srcTriedb, destTriedb, binTrie, root, memLimit)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("Conversion complete", "binaryRoot", currentRoot)
|
||||||
|
|
||||||
|
if ctx.Bool(deleteSourceFlag.Name) {
|
||||||
|
log.Info("Deleting source MPT data")
|
||||||
|
if err := deleteMPTData(chaindb, srcTriedb, root); err != nil {
|
||||||
|
return fmt.Errorf("MPT deletion failed: %w", err)
|
||||||
|
}
|
||||||
|
log.Info("Source MPT data deleted")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runConversionLoop(chaindb ethdb.Database, srcTriedb *triedb.Database, destTriedb *triedb.Database, binTrie *bintrie.BinaryTrie, root common.Hash, memLimit uint64) (common.Hash, error) {
|
||||||
|
currentRoot := types.EmptyBinaryHash
|
||||||
|
stats := &conversionStats{
|
||||||
|
start: time.Now(),
|
||||||
|
lastReport: time.Now(),
|
||||||
|
lastMemChk: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
srcTrie, err := trie.NewStateTrie(trie.StateTrieID(root), srcTriedb)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("failed to open source trie: %w", err)
|
||||||
|
}
|
||||||
|
acctIt, err := srcTrie.NodeIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("failed to create account iterator: %w", err)
|
||||||
|
}
|
||||||
|
accIter := trie.NewIterator(acctIt)
|
||||||
|
|
||||||
|
for accIter.Next() {
|
||||||
|
var acc types.StateAccount
|
||||||
|
if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("invalid account RLP: %w", err)
|
||||||
|
}
|
||||||
|
addrBytes := srcTrie.GetKey(accIter.Key)
|
||||||
|
if addrBytes == nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("missing preimage for account hash %x (run with --cache.preimages)", accIter.Key)
|
||||||
|
}
|
||||||
|
addr := common.BytesToAddress(addrBytes)
|
||||||
|
|
||||||
|
var code []byte
|
||||||
|
codeHash := common.BytesToHash(acc.CodeHash)
|
||||||
|
if codeHash != types.EmptyCodeHash {
|
||||||
|
code = rawdb.ReadCode(chaindb, codeHash)
|
||||||
|
if code == nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("missing code for hash %x (account %x)", codeHash, addr)
|
||||||
|
}
|
||||||
|
stats.codes++
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := binTrie.UpdateAccount(addr, &acc, len(code)); err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("failed to update account %x: %w", addr, err)
|
||||||
|
}
|
||||||
|
if len(code) > 0 {
|
||||||
|
if err := binTrie.UpdateContractCode(addr, codeHash, code); err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("failed to update code for %x: %w", addr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if acc.Root != types.EmptyRootHash {
|
||||||
|
addrHash := common.BytesToHash(accIter.Key)
|
||||||
|
storageTrie, err := trie.NewStateTrie(trie.StorageTrieID(root, addrHash, acc.Root), srcTriedb)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("failed to open storage trie for %x: %w", addr, err)
|
||||||
|
}
|
||||||
|
storageNodeIt, err := storageTrie.NodeIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("failed to create storage iterator for %x: %w", addr, err)
|
||||||
|
}
|
||||||
|
storageIter := trie.NewIterator(storageNodeIt)
|
||||||
|
|
||||||
|
slotCount := uint64(0)
|
||||||
|
for storageIter.Next() {
|
||||||
|
slotKey := storageTrie.GetKey(storageIter.Key)
|
||||||
|
if slotKey == nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("missing preimage for storage key %x (account %x)", storageIter.Key, addr)
|
||||||
|
}
|
||||||
|
_, content, _, err := rlp.Split(storageIter.Value)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("invalid storage RLP for key %x (account %x): %w", slotKey, addr, err)
|
||||||
|
}
|
||||||
|
if err := binTrie.UpdateStorage(addr, slotKey, content); err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("failed to update storage %x/%x: %w", addr, slotKey, err)
|
||||||
|
}
|
||||||
|
stats.slots++
|
||||||
|
slotCount++
|
||||||
|
|
||||||
|
if slotCount%10000 == 0 {
|
||||||
|
binTrie, currentRoot, err = maybeCommit(binTrie, currentRoot, destTriedb, memLimit, stats)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if storageIter.Err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("storage iteration error for %x: %w", addr, storageIter.Err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stats.accounts++
|
||||||
|
stats.report(false)
|
||||||
|
|
||||||
|
if stats.accounts%1000 == 0 {
|
||||||
|
binTrie, currentRoot, err = maybeCommit(binTrie, currentRoot, destTriedb, memLimit, stats)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if accIter.Err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("account iteration error: %w", accIter.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, currentRoot, err = commitBinaryTrie(binTrie, currentRoot, destTriedb)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, fmt.Errorf("final commit failed: %w", err)
|
||||||
|
}
|
||||||
|
stats.commits++
|
||||||
|
stats.report(true)
|
||||||
|
return currentRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeCommit(bt *bintrie.BinaryTrie, currentRoot common.Hash, destDB *triedb.Database, memLimit uint64, stats *conversionStats) (*bintrie.BinaryTrie, common.Hash, error) {
|
||||||
|
if time.Since(stats.lastMemChk) < 5*time.Second {
|
||||||
|
return bt, currentRoot, nil
|
||||||
|
}
|
||||||
|
stats.lastMemChk = time.Now()
|
||||||
|
|
||||||
|
var m runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&m)
|
||||||
|
if m.Alloc < memLimit {
|
||||||
|
return bt, currentRoot, nil
|
||||||
|
}
|
||||||
|
log.Info("Memory limit reached, committing", "alloc", common.StorageSize(m.Alloc), "limit", common.StorageSize(memLimit))
|
||||||
|
|
||||||
|
bt, currentRoot, err := commitBinaryTrie(bt, currentRoot, destDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, common.Hash{}, err
|
||||||
|
}
|
||||||
|
stats.commits++
|
||||||
|
stats.report(true)
|
||||||
|
return bt, currentRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func commitBinaryTrie(bt *bintrie.BinaryTrie, currentRoot common.Hash, destDB *triedb.Database) (*bintrie.BinaryTrie, common.Hash, error) {
|
||||||
|
newRoot, nodeSet := bt.Commit(false)
|
||||||
|
if nodeSet != nil {
|
||||||
|
merged := trienode.NewWithNodeSet(nodeSet)
|
||||||
|
if err := destDB.Update(newRoot, currentRoot, 0, merged, triedb.NewStateSet()); err != nil {
|
||||||
|
return nil, common.Hash{}, fmt.Errorf("triedb update failed: %w", err)
|
||||||
|
}
|
||||||
|
if err := destDB.Commit(newRoot, false); err != nil {
|
||||||
|
return nil, common.Hash{}, fmt.Errorf("triedb commit failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
runtime.GC()
|
||||||
|
debug.FreeOSMemory()
|
||||||
|
|
||||||
|
bt, err := bintrie.NewBinaryTrie(newRoot, destDB)
|
||||||
|
if err != nil {
|
||||||
|
return nil, common.Hash{}, fmt.Errorf("failed to reload binary trie: %w", err)
|
||||||
|
}
|
||||||
|
return bt, newRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteMPTData(chaindb ethdb.Database, srcTriedb *triedb.Database, root common.Hash) error {
|
||||||
|
isPathDB := srcTriedb.Scheme() == rawdb.PathScheme
|
||||||
|
|
||||||
|
srcTrie, err := trie.NewStateTrie(trie.StateTrieID(root), srcTriedb)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open source trie for deletion: %w", err)
|
||||||
|
}
|
||||||
|
acctIt, err := srcTrie.NodeIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create account iterator for deletion: %w", err)
|
||||||
|
}
|
||||||
|
batch := chaindb.NewBatch()
|
||||||
|
deleted := 0
|
||||||
|
|
||||||
|
for acctIt.Next(true) {
|
||||||
|
if isPathDB {
|
||||||
|
rawdb.DeleteAccountTrieNode(batch, acctIt.Path())
|
||||||
|
} else {
|
||||||
|
node := acctIt.Hash()
|
||||||
|
if node != (common.Hash{}) {
|
||||||
|
rawdb.DeleteLegacyTrieNode(batch, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
deleted++
|
||||||
|
|
||||||
|
if acctIt.Leaf() {
|
||||||
|
var acc types.StateAccount
|
||||||
|
if err := rlp.DecodeBytes(acctIt.LeafBlob(), &acc); err != nil {
|
||||||
|
return fmt.Errorf("invalid account during deletion: %w", err)
|
||||||
|
}
|
||||||
|
if acc.Root != types.EmptyRootHash {
|
||||||
|
addrHash := common.BytesToHash(acctIt.LeafKey())
|
||||||
|
storageTrie, err := trie.NewStateTrie(trie.StorageTrieID(root, addrHash, acc.Root), srcTriedb)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open storage trie for deletion: %w", err)
|
||||||
|
}
|
||||||
|
storageIt, err := storageTrie.NodeIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create storage iterator for deletion: %w", err)
|
||||||
|
}
|
||||||
|
for storageIt.Next(true) {
|
||||||
|
if isPathDB {
|
||||||
|
rawdb.DeleteStorageTrieNode(batch, addrHash, storageIt.Path())
|
||||||
|
} else {
|
||||||
|
node := storageIt.Hash()
|
||||||
|
if node != (common.Hash{}) {
|
||||||
|
rawdb.DeleteLegacyTrieNode(batch, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
deleted++
|
||||||
|
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return fmt.Errorf("batch write failed: %w", err)
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if storageIt.Error() != nil {
|
||||||
|
return fmt.Errorf("storage deletion iterator error: %w", storageIt.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if batch.ValueSize() >= ethdb.IdealBatchSize {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return fmt.Errorf("batch write failed: %w", err)
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if acctIt.Error() != nil {
|
||||||
|
return fmt.Errorf("account deletion iterator error: %w", acctIt.Error())
|
||||||
|
}
|
||||||
|
if batch.ValueSize() > 0 {
|
||||||
|
if err := batch.Write(); err != nil {
|
||||||
|
return fmt.Errorf("final batch write failed: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Info("MPT deletion complete", "nodesDeleted", deleted)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
229
cmd/geth/bintrie_convert_test.go
Normal file
229
cmd/geth/bintrie_convert_test.go
Normal file
|
|
@ -0,0 +1,229 @@
|
||||||
|
// Copyright 2026 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/trie/bintrie"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb/pathdb"
|
||||||
|
"github.com/holiman/uint256"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBintrieConvert(t *testing.T) {
|
||||||
|
var (
|
||||||
|
addr1 = common.HexToAddress("0x1111111111111111111111111111111111111111")
|
||||||
|
addr2 = common.HexToAddress("0x2222222222222222222222222222222222222222")
|
||||||
|
slotKey1 = common.HexToHash("0x01")
|
||||||
|
slotKey2 = common.HexToHash("0x02")
|
||||||
|
slotVal1 = common.HexToHash("0xdeadbeef")
|
||||||
|
slotVal2 = common.HexToHash("0xcafebabe")
|
||||||
|
code = []byte{0x60, 0x42, 0x60, 0x00, 0x52, 0x60, 0x20, 0x60, 0x00, 0xf3}
|
||||||
|
)
|
||||||
|
|
||||||
|
chaindb := rawdb.NewMemoryDatabase()
|
||||||
|
|
||||||
|
srcTriedb := triedb.NewDatabase(chaindb, &triedb.Config{
|
||||||
|
Preimages: true,
|
||||||
|
PathDB: pathdb.Defaults,
|
||||||
|
})
|
||||||
|
|
||||||
|
gspec := &core.Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
Alloc: types.GenesisAlloc{
|
||||||
|
addr1: {
|
||||||
|
Balance: big.NewInt(1000000),
|
||||||
|
Nonce: 5,
|
||||||
|
},
|
||||||
|
addr2: {
|
||||||
|
Balance: big.NewInt(2000000),
|
||||||
|
Nonce: 10,
|
||||||
|
Code: code,
|
||||||
|
Storage: map[common.Hash]common.Hash{
|
||||||
|
slotKey1: slotVal1,
|
||||||
|
slotKey2: slotVal2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
genesisBlock := gspec.MustCommit(chaindb, srcTriedb)
|
||||||
|
root := genesisBlock.Root()
|
||||||
|
t.Logf("Genesis root: %x", root)
|
||||||
|
srcTriedb.Close()
|
||||||
|
|
||||||
|
srcTriedb2 := triedb.NewDatabase(chaindb, &triedb.Config{
|
||||||
|
Preimages: true,
|
||||||
|
PathDB: &pathdb.Config{ReadOnly: true},
|
||||||
|
})
|
||||||
|
defer srcTriedb2.Close()
|
||||||
|
|
||||||
|
destTriedb := triedb.NewDatabase(chaindb, &triedb.Config{
|
||||||
|
IsVerkle: true,
|
||||||
|
PathDB: pathdb.Defaults,
|
||||||
|
})
|
||||||
|
defer destTriedb.Close()
|
||||||
|
|
||||||
|
bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, destTriedb)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create binary trie: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
currentRoot, err := runConversionLoop(chaindb, srcTriedb2, destTriedb, bt, root, math.MaxUint64)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("conversion failed: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Binary trie root: %x", currentRoot)
|
||||||
|
|
||||||
|
bt2, err := bintrie.NewBinaryTrie(currentRoot, destTriedb)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to reload binary trie: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
acc1, err := bt2.GetAccount(addr1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get account1: %v", err)
|
||||||
|
}
|
||||||
|
if acc1 == nil {
|
||||||
|
t.Fatal("account1 not found in binary trie")
|
||||||
|
}
|
||||||
|
if acc1.Nonce != 5 {
|
||||||
|
t.Errorf("account1 nonce: got %d, want 5", acc1.Nonce)
|
||||||
|
}
|
||||||
|
wantBal1 := uint256.NewInt(1000000)
|
||||||
|
if acc1.Balance.Cmp(wantBal1) != 0 {
|
||||||
|
t.Errorf("account1 balance: got %s, want %s", acc1.Balance, wantBal1)
|
||||||
|
}
|
||||||
|
|
||||||
|
acc2, err := bt2.GetAccount(addr2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get account2: %v", err)
|
||||||
|
}
|
||||||
|
if acc2 == nil {
|
||||||
|
t.Fatal("account2 not found in binary trie")
|
||||||
|
}
|
||||||
|
if acc2.Nonce != 10 {
|
||||||
|
t.Errorf("account2 nonce: got %d, want 10", acc2.Nonce)
|
||||||
|
}
|
||||||
|
wantBal2 := uint256.NewInt(2000000)
|
||||||
|
if acc2.Balance.Cmp(wantBal2) != 0 {
|
||||||
|
t.Errorf("account2 balance: got %s, want %s", acc2.Balance, wantBal2)
|
||||||
|
}
|
||||||
|
|
||||||
|
treeKey1 := bintrie.GetBinaryTreeKeyStorageSlot(addr2, slotKey1[:])
|
||||||
|
val1, err := bt2.GetWithHashedKey(treeKey1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get storage slot1: %v", err)
|
||||||
|
}
|
||||||
|
if len(val1) == 0 {
|
||||||
|
t.Fatal("storage slot1 not found")
|
||||||
|
}
|
||||||
|
got1 := common.BytesToHash(val1)
|
||||||
|
if got1 != slotVal1 {
|
||||||
|
t.Errorf("storage slot1: got %x, want %x", got1, slotVal1)
|
||||||
|
}
|
||||||
|
|
||||||
|
treeKey2 := bintrie.GetBinaryTreeKeyStorageSlot(addr2, slotKey2[:])
|
||||||
|
val2, err := bt2.GetWithHashedKey(treeKey2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get storage slot2: %v", err)
|
||||||
|
}
|
||||||
|
if len(val2) == 0 {
|
||||||
|
t.Fatal("storage slot2 not found")
|
||||||
|
}
|
||||||
|
got2 := common.BytesToHash(val2)
|
||||||
|
if got2 != slotVal2 {
|
||||||
|
t.Errorf("storage slot2: got %x, want %x", got2, slotVal2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBintrieConvertDeleteSource(t *testing.T) {
|
||||||
|
addr1 := common.HexToAddress("0x3333333333333333333333333333333333333333")
|
||||||
|
|
||||||
|
chaindb := rawdb.NewMemoryDatabase()
|
||||||
|
|
||||||
|
srcTriedb := triedb.NewDatabase(chaindb, &triedb.Config{
|
||||||
|
Preimages: true,
|
||||||
|
PathDB: pathdb.Defaults,
|
||||||
|
})
|
||||||
|
|
||||||
|
gspec := &core.Genesis{
|
||||||
|
Config: params.TestChainConfig,
|
||||||
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
||||||
|
Alloc: types.GenesisAlloc{
|
||||||
|
addr1: {
|
||||||
|
Balance: big.NewInt(1000000),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
genesisBlock := gspec.MustCommit(chaindb, srcTriedb)
|
||||||
|
root := genesisBlock.Root()
|
||||||
|
srcTriedb.Close()
|
||||||
|
|
||||||
|
srcTriedb2 := triedb.NewDatabase(chaindb, &triedb.Config{
|
||||||
|
Preimages: true,
|
||||||
|
PathDB: &pathdb.Config{ReadOnly: true},
|
||||||
|
})
|
||||||
|
|
||||||
|
destTriedb := triedb.NewDatabase(chaindb, &triedb.Config{
|
||||||
|
IsVerkle: true,
|
||||||
|
PathDB: pathdb.Defaults,
|
||||||
|
})
|
||||||
|
|
||||||
|
bt, err := bintrie.NewBinaryTrie(types.EmptyBinaryHash, destTriedb)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create binary trie: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newRoot, err := runConversionLoop(chaindb, srcTriedb2, destTriedb, bt, root, math.MaxUint64)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("conversion failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := deleteMPTData(chaindb, srcTriedb2, root); err != nil {
|
||||||
|
t.Fatalf("deletion failed: %v", err)
|
||||||
|
}
|
||||||
|
srcTriedb2.Close()
|
||||||
|
|
||||||
|
bt2, err := bintrie.NewBinaryTrie(newRoot, destTriedb)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to reload binary trie after deletion: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
acc, err := bt2.GetAccount(addr1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get account after deletion: %v", err)
|
||||||
|
}
|
||||||
|
if acc == nil {
|
||||||
|
t.Fatal("account not found after MPT deletion")
|
||||||
|
}
|
||||||
|
wantBal := uint256.NewInt(1000000)
|
||||||
|
if acc.Balance.Cmp(wantBal) != 0 {
|
||||||
|
t.Errorf("balance after deletion: got %s, want %s", acc.Balance, wantBal)
|
||||||
|
}
|
||||||
|
destTriedb.Close()
|
||||||
|
}
|
||||||
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
@ -43,6 +44,8 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/internal/debug"
|
"github.com/ethereum/go-ethereum/internal/debug"
|
||||||
"github.com/ethereum/go-ethereum/internal/era"
|
"github.com/ethereum/go-ethereum/internal/era"
|
||||||
"github.com/ethereum/go-ethereum/internal/era/eradl"
|
"github.com/ethereum/go-ethereum/internal/era/eradl"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era/execdb"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/era/onedb"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/node"
|
"github.com/ethereum/go-ethereum/node"
|
||||||
|
|
@ -96,6 +99,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
|
||||||
utils.CacheNoPrefetchFlag,
|
utils.CacheNoPrefetchFlag,
|
||||||
utils.CachePreimagesFlag,
|
utils.CachePreimagesFlag,
|
||||||
utils.NoCompactionFlag,
|
utils.NoCompactionFlag,
|
||||||
|
utils.LogSlowBlockFlag,
|
||||||
utils.MetricsEnabledFlag,
|
utils.MetricsEnabledFlag,
|
||||||
utils.MetricsEnabledExpensiveFlag,
|
utils.MetricsEnabledExpensiveFlag,
|
||||||
utils.MetricsHTTPFlag,
|
utils.MetricsHTTPFlag,
|
||||||
|
|
@ -107,6 +111,7 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
|
||||||
utils.MetricsInfluxDBUsernameFlag,
|
utils.MetricsInfluxDBUsernameFlag,
|
||||||
utils.MetricsInfluxDBPasswordFlag,
|
utils.MetricsInfluxDBPasswordFlag,
|
||||||
utils.MetricsInfluxDBTagsFlag,
|
utils.MetricsInfluxDBTagsFlag,
|
||||||
|
utils.MetricsInfluxDBIntervalFlag,
|
||||||
utils.MetricsInfluxDBTokenFlag,
|
utils.MetricsInfluxDBTokenFlag,
|
||||||
utils.MetricsInfluxDBBucketFlag,
|
utils.MetricsInfluxDBBucketFlag,
|
||||||
utils.MetricsInfluxDBOrganizationFlag,
|
utils.MetricsInfluxDBOrganizationFlag,
|
||||||
|
|
@ -119,6 +124,8 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
|
||||||
utils.LogNoHistoryFlag,
|
utils.LogNoHistoryFlag,
|
||||||
utils.LogExportCheckpointsFlag,
|
utils.LogExportCheckpointsFlag,
|
||||||
utils.StateHistoryFlag,
|
utils.StateHistoryFlag,
|
||||||
|
utils.TrienodeHistoryFlag,
|
||||||
|
utils.TrienodeHistoryFullValueCheckpointFlag,
|
||||||
}, utils.DatabaseFlags, debug.Flags),
|
}, utils.DatabaseFlags, debug.Flags),
|
||||||
Before: func(ctx *cli.Context) error {
|
Before: func(ctx *cli.Context) error {
|
||||||
flags.MigrateGlobalFlags(ctx)
|
flags.MigrateGlobalFlags(ctx)
|
||||||
|
|
@ -150,7 +157,7 @@ be gzipped.`,
|
||||||
Name: "import-history",
|
Name: "import-history",
|
||||||
Usage: "Import an Era archive",
|
Usage: "Import an Era archive",
|
||||||
ArgsUsage: "<dir>",
|
ArgsUsage: "<dir>",
|
||||||
Flags: slices.Concat([]cli.Flag{utils.TxLookupLimitFlag, utils.TransactionHistoryFlag}, utils.DatabaseFlags, utils.NetworkFlags),
|
Flags: slices.Concat([]cli.Flag{utils.TxLookupLimitFlag, utils.TransactionHistoryFlag, utils.EraFormatFlag}, utils.DatabaseFlags, utils.NetworkFlags),
|
||||||
Description: `
|
Description: `
|
||||||
The import-history command will import blocks and their corresponding receipts
|
The import-history command will import blocks and their corresponding receipts
|
||||||
from Era archives.
|
from Era archives.
|
||||||
|
|
@ -161,7 +168,7 @@ from Era archives.
|
||||||
Name: "export-history",
|
Name: "export-history",
|
||||||
Usage: "Export blockchain history to Era archives",
|
Usage: "Export blockchain history to Era archives",
|
||||||
ArgsUsage: "<dir> <first> <last>",
|
ArgsUsage: "<dir> <first> <last>",
|
||||||
Flags: utils.DatabaseFlags,
|
Flags: slices.Concat([]cli.Flag{utils.EraFormatFlag}, utils.DatabaseFlags),
|
||||||
Description: `
|
Description: `
|
||||||
The export-history command will export blocks and their corresponding receipts
|
The export-history command will export blocks and their corresponding receipts
|
||||||
into Era archives. Eras are typically packaged in steps of 8192 blocks.
|
into Era archives. Eras are typically packaged in steps of 8192 blocks.
|
||||||
|
|
@ -201,13 +208,19 @@ This command dumps out the state for a given block (or latest, if none provided)
|
||||||
pruneHistoryCommand = &cli.Command{
|
pruneHistoryCommand = &cli.Command{
|
||||||
Action: pruneHistory,
|
Action: pruneHistory,
|
||||||
Name: "prune-history",
|
Name: "prune-history",
|
||||||
Usage: "Prune blockchain history (block bodies and receipts) up to the merge block",
|
Usage: "Prune blockchain history (block bodies and receipts) up to a specified point",
|
||||||
ArgsUsage: "",
|
ArgsUsage: "",
|
||||||
Flags: utils.DatabaseFlags,
|
Flags: slices.Concat(utils.DatabaseFlags, []cli.Flag{
|
||||||
|
utils.ChainHistoryFlag,
|
||||||
|
}),
|
||||||
Description: `
|
Description: `
|
||||||
The prune-history command removes historical block bodies and receipts from the
|
The prune-history command removes historical block bodies and receipts from the
|
||||||
blockchain database up to the merge block, while preserving block headers. This
|
blockchain database up to a specified point, while preserving block headers. This
|
||||||
helps reduce storage requirements for nodes that don't need full historical data.`,
|
helps reduce storage requirements for nodes that don't need full historical data.
|
||||||
|
|
||||||
|
The --history.chain flag is required to specify the pruning target:
|
||||||
|
- postmerge: Prune up to the merge block. The node will keep the merge block and everything thereafter.
|
||||||
|
- postprague: Prune up to the Prague (Pectra) upgrade block. The node will keep the prague block and everything thereafter.`,
|
||||||
}
|
}
|
||||||
|
|
||||||
downloadEraCommand = &cli.Command{
|
downloadEraCommand = &cli.Command{
|
||||||
|
|
@ -295,7 +308,7 @@ func initGenesis(ctx *cli.Context) error {
|
||||||
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
|
triedb := utils.MakeTrieDatabase(ctx, stack, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
|
||||||
defer triedb.Close()
|
defer triedb.Close()
|
||||||
|
|
||||||
_, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
|
_, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to write genesis block: %v", err)
|
utils.Fatalf("Failed to write genesis block: %v", err)
|
||||||
}
|
}
|
||||||
|
|
@ -513,15 +526,27 @@ func importHistory(ctx *cli.Context) error {
|
||||||
network = networks[0]
|
network = networks[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := utils.ImportHistory(chain, dir, network); err != nil {
|
var (
|
||||||
|
format = ctx.String(utils.EraFormatFlag.Name)
|
||||||
|
from func(era.ReadAtSeekCloser) (era.Era, error)
|
||||||
|
)
|
||||||
|
switch format {
|
||||||
|
case "era1", "era":
|
||||||
|
from = onedb.From
|
||||||
|
case "erae":
|
||||||
|
from = execdb.From
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown --era.format %q (expected 'era1' or 'erae')", format)
|
||||||
|
}
|
||||||
|
if err := utils.ImportHistory(chain, dir, network, from); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Import done in %v\n", time.Since(start))
|
fmt.Printf("Import done in %v\n", time.Since(start))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// exportHistory exports chain history in Era archives at a specified
|
// exportHistory exports chain history in Era archives at a specified directory.
|
||||||
// directory.
|
|
||||||
func exportHistory(ctx *cli.Context) error {
|
func exportHistory(ctx *cli.Context) error {
|
||||||
if ctx.Args().Len() != 3 {
|
if ctx.Args().Len() != 3 {
|
||||||
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
|
||||||
|
|
@ -547,10 +572,26 @@ func exportHistory(ctx *cli.Context) error {
|
||||||
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
|
if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
|
||||||
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
|
utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
|
||||||
}
|
}
|
||||||
err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
|
|
||||||
if err != nil {
|
var (
|
||||||
|
format = ctx.String(utils.EraFormatFlag.Name)
|
||||||
|
filename func(network string, epoch int, root common.Hash) string
|
||||||
|
newBuilder func(w io.Writer) era.Builder
|
||||||
|
)
|
||||||
|
switch format {
|
||||||
|
case "era1", "era":
|
||||||
|
newBuilder = func(w io.Writer) era.Builder { return onedb.NewBuilder(w) }
|
||||||
|
filename = func(network string, epoch int, root common.Hash) string { return onedb.Filename(network, epoch, root) }
|
||||||
|
case "erae":
|
||||||
|
newBuilder = func(w io.Writer) era.Builder { return execdb.NewBuilder(w) }
|
||||||
|
filename = func(network string, epoch int, root common.Hash) string { return execdb.Filename(network, epoch, root) }
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unknown archive format %q (use 'era1' or 'erae')", format)
|
||||||
|
}
|
||||||
|
if err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), newBuilder, filename); err != nil {
|
||||||
utils.Fatalf("Export error: %v\n", err)
|
utils.Fatalf("Export error: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("Export done in %v\n", time.Since(start))
|
fmt.Printf("Export done in %v\n", time.Since(start))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -668,47 +709,77 @@ func hashish(x string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func pruneHistory(ctx *cli.Context) error {
|
func pruneHistory(ctx *cli.Context) error {
|
||||||
|
// Parse and validate the history mode flag.
|
||||||
|
if !ctx.IsSet(utils.ChainHistoryFlag.Name) {
|
||||||
|
return errors.New("--history.chain flag is required")
|
||||||
|
}
|
||||||
|
var mode history.HistoryMode
|
||||||
|
if err := mode.UnmarshalText([]byte(ctx.String(utils.ChainHistoryFlag.Name))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if mode == history.KeepAll {
|
||||||
|
return errors.New("--history.chain=all is not valid for pruning. To restore history, use 'geth import-history'")
|
||||||
|
}
|
||||||
|
|
||||||
stack, _ := makeConfigNode(ctx)
|
stack, _ := makeConfigNode(ctx)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
// Open the chain database
|
// Open the chain database.
|
||||||
chain, chaindb := utils.MakeChain(ctx, stack, false)
|
chain, chaindb := utils.MakeChain(ctx, stack, false)
|
||||||
defer chaindb.Close()
|
defer chaindb.Close()
|
||||||
defer chain.Stop()
|
defer chain.Stop()
|
||||||
|
|
||||||
// Determine the prune point. This will be the first PoS block.
|
// Determine the prune point based on the history mode.
|
||||||
prunePoint, ok := history.PrunePoints[chain.Genesis().Hash()]
|
genesisHash := chain.Genesis().Hash()
|
||||||
if !ok || prunePoint == nil {
|
policy, err := history.NewPolicy(mode, genesisHash)
|
||||||
return errors.New("prune point not found")
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if policy.Target == nil {
|
||||||
|
return fmt.Errorf("prune point for %q not found for this network", mode.String())
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
mergeBlock = prunePoint.BlockNumber
|
targetBlock = policy.Target.BlockNumber
|
||||||
mergeBlockHash = prunePoint.BlockHash.Hex()
|
targetBlockHash = policy.Target.BlockHash
|
||||||
)
|
)
|
||||||
|
|
||||||
// Check we're far enough past merge to ensure all data is in freezer
|
// Check the current freezer tail to see if pruning is needed/possible.
|
||||||
|
freezerTail, _ := chaindb.Tail()
|
||||||
|
if freezerTail > 0 {
|
||||||
|
if freezerTail == targetBlock {
|
||||||
|
log.Info("Database already pruned to target block", "tail", freezerTail)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if freezerTail > targetBlock {
|
||||||
|
// Database is pruned beyond the target - can't unprune.
|
||||||
|
return fmt.Errorf("database is already pruned to block %d, which is beyond target %d. Cannot unprune. To restore history, use 'geth import-history'", freezerTail, targetBlock)
|
||||||
|
}
|
||||||
|
// freezerTail < targetBlock: we can prune further, continue below.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check we're far enough past the target to ensure all data is in freezer.
|
||||||
currentHeader := chain.CurrentHeader()
|
currentHeader := chain.CurrentHeader()
|
||||||
if currentHeader == nil {
|
if currentHeader == nil {
|
||||||
return errors.New("current header not found")
|
return errors.New("current header not found")
|
||||||
}
|
}
|
||||||
if currentHeader.Number.Uint64() < mergeBlock+params.FullImmutabilityThreshold {
|
if currentHeader.Number.Uint64() < targetBlock+params.FullImmutabilityThreshold {
|
||||||
return fmt.Errorf("chain not far enough past merge block, need %d more blocks",
|
return fmt.Errorf("chain not far enough past target block %d, need %d more blocks",
|
||||||
mergeBlock+params.FullImmutabilityThreshold-currentHeader.Number.Uint64())
|
targetBlock, targetBlock+params.FullImmutabilityThreshold-currentHeader.Number.Uint64())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Double-check the prune block in db has the expected hash.
|
// Double-check the target block in db has the expected hash.
|
||||||
hash := rawdb.ReadCanonicalHash(chaindb, mergeBlock)
|
hash := rawdb.ReadCanonicalHash(chaindb, targetBlock)
|
||||||
if hash != common.HexToHash(mergeBlockHash) {
|
if hash != targetBlockHash {
|
||||||
return fmt.Errorf("merge block hash mismatch: got %s, want %s", hash.Hex(), mergeBlockHash)
|
return fmt.Errorf("target block hash mismatch: got %s, want %s", hash.Hex(), targetBlockHash.Hex())
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Starting history pruning", "head", currentHeader.Number, "tail", mergeBlock, "tailHash", mergeBlockHash)
|
log.Info("Starting history pruning", "head", currentHeader.Number, "target", targetBlock, "targetHash", targetBlockHash.Hex())
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
rawdb.PruneTransactionIndex(chaindb, mergeBlock)
|
rawdb.PruneTransactionIndex(chaindb, targetBlock)
|
||||||
if _, err := chaindb.TruncateTail(mergeBlock); err != nil {
|
if _, err := chaindb.TruncateTail(targetBlock); err != nil {
|
||||||
return fmt.Errorf("failed to truncate ancient data: %v", err)
|
return fmt.Errorf("failed to truncate ancient data: %v", err)
|
||||||
}
|
}
|
||||||
log.Info("History pruning completed", "tail", mergeBlock, "elapsed", common.PrettyDuration(time.Since(start)))
|
log.Info("History pruning completed", "tail", targetBlock, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
|
||||||
// TODO(s1na): what if there is a crash between the two prune operations?
|
// TODO(s1na): what if there is a crash between the two prune operations?
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -35,11 +35,11 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/beacon/blsync"
|
"github.com/ethereum/go-ethereum/beacon/blsync"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
"github.com/ethereum/go-ethereum/eth/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/telemetry/tracesetup"
|
||||||
"github.com/ethereum/go-ethereum/internal/version"
|
"github.com/ethereum/go-ethereum/internal/version"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
|
|
@ -240,9 +240,15 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||||
cfg.Eth.OverrideVerkle = &v
|
cfg.Eth.OverrideVerkle = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start metrics export if enabled
|
// Start metrics export if enabled.
|
||||||
utils.SetupMetrics(&cfg.Metrics)
|
utils.SetupMetrics(&cfg.Metrics)
|
||||||
|
|
||||||
|
// Setup OpenTelemetry reporting if enabled.
|
||||||
|
if err := tracesetup.SetupTelemetry(cfg.Node.OpenTelemetry, stack); err != nil {
|
||||||
|
utils.Fatalf("failed to setup OpenTelemetry: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Ethereum service.
|
||||||
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
|
||||||
// Create gauge with geth system and build information
|
// Create gauge with geth system and build information
|
||||||
|
|
@ -273,11 +279,11 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||||
// Configure synchronization override service
|
// Configure synchronization override service
|
||||||
var synctarget common.Hash
|
var synctarget common.Hash
|
||||||
if ctx.IsSet(utils.SyncTargetFlag.Name) {
|
if ctx.IsSet(utils.SyncTargetFlag.Name) {
|
||||||
hex := hexutil.MustDecode(ctx.String(utils.SyncTargetFlag.Name))
|
target := ctx.String(utils.SyncTargetFlag.Name)
|
||||||
if len(hex) != common.HashLength {
|
if !common.IsHexHash(target) {
|
||||||
utils.Fatalf("invalid sync target length: have %d, want %d", len(hex), common.HashLength)
|
utils.Fatalf("sync target hash is not a valid hex hash: %s", target)
|
||||||
}
|
}
|
||||||
synctarget = common.BytesToHash(hex)
|
synctarget = common.HexToHash(target)
|
||||||
}
|
}
|
||||||
utils.RegisterSyncOverrideService(stack, eth, synctarget, ctx.Bool(utils.ExitWhenSyncedFlag.Name))
|
utils.RegisterSyncOverrideService(stack, eth, synctarget, ctx.Bool(utils.ExitWhenSyncedFlag.Name))
|
||||||
|
|
||||||
|
|
@ -371,6 +377,9 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
|
||||||
if ctx.IsSet(utils.MetricsInfluxDBTagsFlag.Name) {
|
if ctx.IsSet(utils.MetricsInfluxDBTagsFlag.Name) {
|
||||||
cfg.Metrics.InfluxDBTags = ctx.String(utils.MetricsInfluxDBTagsFlag.Name)
|
cfg.Metrics.InfluxDBTags = ctx.String(utils.MetricsInfluxDBTagsFlag.Name)
|
||||||
}
|
}
|
||||||
|
if ctx.IsSet(utils.MetricsInfluxDBIntervalFlag.Name) {
|
||||||
|
cfg.Metrics.InfluxDBInterval = ctx.Duration(utils.MetricsInfluxDBIntervalFlag.Name)
|
||||||
|
}
|
||||||
if ctx.IsSet(utils.MetricsEnableInfluxDBV2Flag.Name) {
|
if ctx.IsSet(utils.MetricsEnableInfluxDBV2Flag.Name) {
|
||||||
cfg.Metrics.EnableInfluxDBV2 = ctx.Bool(utils.MetricsEnableInfluxDBV2Flag.Name)
|
cfg.Metrics.EnableInfluxDBV2 = ctx.Bool(utils.MetricsEnableInfluxDBV2Flag.Name)
|
||||||
}
|
}
|
||||||
|
|
@ -399,9 +408,9 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
|
||||||
ctx.IsSet(utils.MetricsInfluxDBBucketFlag.Name)
|
ctx.IsSet(utils.MetricsInfluxDBBucketFlag.Name)
|
||||||
|
|
||||||
if enableExport && v2FlagIsSet {
|
if enableExport && v2FlagIsSet {
|
||||||
utils.Fatalf("Flags --influxdb.metrics.organization, --influxdb.metrics.token, --influxdb.metrics.bucket are only available for influxdb-v2")
|
utils.Fatalf("Flags --%s, --%s, --%s are only available for influxdb-v2", utils.MetricsInfluxDBOrganizationFlag.Name, utils.MetricsInfluxDBTokenFlag.Name, utils.MetricsInfluxDBBucketFlag.Name)
|
||||||
} else if enableExportV2 && v1FlagIsSet {
|
} else if enableExportV2 && v1FlagIsSet {
|
||||||
utils.Fatalf("Flags --influxdb.metrics.username, --influxdb.metrics.password are only available for influxdb-v1")
|
utils.Fatalf("Flags --%s, --%s are only available for influxdb-v1", utils.MetricsInfluxDBUsernameFlag.Name, utils.MetricsInfluxDBPasswordFlag.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ipcAPIs = "admin:1.0 debug:1.0 engine:1.0 eth:1.0 miner:1.0 net:1.0 rpc:1.0 txpool:1.0 web3:1.0"
|
ipcAPIs = "admin:1.0 debug:1.0 engine:1.0 eth:1.0 miner:1.0 net:1.0 rpc:1.0 testing:1.0 txpool:1.0 web3:1.0"
|
||||||
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
|
httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -39,8 +39,9 @@ const (
|
||||||
// child g gets a temporary data directory.
|
// child g gets a temporary data directory.
|
||||||
func runMinimalGeth(t *testing.T, args ...string) *testgeth {
|
func runMinimalGeth(t *testing.T, args ...string) *testgeth {
|
||||||
// --holesky to make the 'writing genesis to disk' faster (no accounts)
|
// --holesky to make the 'writing genesis to disk' faster (no accounts)
|
||||||
|
// --networkid=1337 to avoid cache bump
|
||||||
// --syncmode=full to avoid allocating fast sync bloom
|
// --syncmode=full to avoid allocating fast sync bloom
|
||||||
allArgs := []string{"--holesky", "--authrpc.port", "0", "--syncmode=full", "--port", "0",
|
allArgs := []string{"--holesky", "--networkid", "1337", "--authrpc.port", "0", "--syncmode=full", "--port", "0",
|
||||||
"--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64",
|
"--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64",
|
||||||
"--datadir.minfreedisk", "0"}
|
"--datadir.minfreedisk", "0"}
|
||||||
return runGeth(t, append(allArgs, args...)...)
|
return runGeth(t, append(allArgs, args...)...)
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
@ -37,11 +38,11 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/tablewriter"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/triedb"
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/olekukonko/tablewriter"
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -52,7 +53,24 @@ var (
|
||||||
}
|
}
|
||||||
removeChainDataFlag = &cli.BoolFlag{
|
removeChainDataFlag = &cli.BoolFlag{
|
||||||
Name: "remove.chain",
|
Name: "remove.chain",
|
||||||
Usage: "If set, selects the state data for removal",
|
Usage: "If set, selects the chain data for removal",
|
||||||
|
}
|
||||||
|
inspectTrieTopFlag = &cli.IntFlag{
|
||||||
|
Name: "top",
|
||||||
|
Usage: "Print the top N results per ranking category",
|
||||||
|
Value: 10,
|
||||||
|
}
|
||||||
|
inspectTrieDumpPathFlag = &cli.StringFlag{
|
||||||
|
Name: "dump-path",
|
||||||
|
Usage: "Path for the trie statistics dump file",
|
||||||
|
}
|
||||||
|
inspectTrieSummarizeFlag = &cli.StringFlag{
|
||||||
|
Name: "summarize",
|
||||||
|
Usage: "Summarize an existing trie dump file (skip trie traversal)",
|
||||||
|
}
|
||||||
|
inspectTrieContractFlag = &cli.StringFlag{
|
||||||
|
Name: "contract",
|
||||||
|
Usage: "Inspect only the storage of the given contract address (skips full account trie walk)",
|
||||||
}
|
}
|
||||||
|
|
||||||
removedbCommand = &cli.Command{
|
removedbCommand = &cli.Command{
|
||||||
|
|
@ -75,6 +93,7 @@ Remove blockchain and state databases`,
|
||||||
dbCompactCmd,
|
dbCompactCmd,
|
||||||
dbGetCmd,
|
dbGetCmd,
|
||||||
dbDeleteCmd,
|
dbDeleteCmd,
|
||||||
|
dbInspectTrieCmd,
|
||||||
dbPutCmd,
|
dbPutCmd,
|
||||||
dbGetSlotsCmd,
|
dbGetSlotsCmd,
|
||||||
dbDumpFreezerIndex,
|
dbDumpFreezerIndex,
|
||||||
|
|
@ -93,6 +112,22 @@ Remove blockchain and state databases`,
|
||||||
Usage: "Inspect the storage size for each type of data in the database",
|
Usage: "Inspect the storage size for each type of data in the database",
|
||||||
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
|
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
|
||||||
}
|
}
|
||||||
|
dbInspectTrieCmd = &cli.Command{
|
||||||
|
Action: inspectTrie,
|
||||||
|
Name: "inspect-trie",
|
||||||
|
ArgsUsage: "<blocknum>",
|
||||||
|
Flags: slices.Concat([]cli.Flag{
|
||||||
|
utils.ExcludeStorageFlag,
|
||||||
|
inspectTrieTopFlag,
|
||||||
|
utils.OutputFileFlag,
|
||||||
|
inspectTrieDumpPathFlag,
|
||||||
|
inspectTrieSummarizeFlag,
|
||||||
|
inspectTrieContractFlag,
|
||||||
|
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||||
|
Usage: "Print detailed trie information about the structure of account trie and storage tries.",
|
||||||
|
Description: `This commands iterates the entrie trie-backed state. If the 'blocknum' is not specified,
|
||||||
|
the latest block number will be used by default.`,
|
||||||
|
}
|
||||||
dbCheckStateContentCmd = &cli.Command{
|
dbCheckStateContentCmd = &cli.Command{
|
||||||
Action: checkStateContent,
|
Action: checkStateContent,
|
||||||
Name: "check-state-content",
|
Name: "check-state-content",
|
||||||
|
|
@ -386,6 +421,88 @@ func checkStateContent(ctx *cli.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func inspectTrie(ctx *cli.Context) error {
|
||||||
|
topN := ctx.Int(inspectTrieTopFlag.Name)
|
||||||
|
if topN <= 0 {
|
||||||
|
return fmt.Errorf("invalid --%s value %d (must be > 0)", inspectTrieTopFlag.Name, topN)
|
||||||
|
}
|
||||||
|
config := &trie.InspectConfig{
|
||||||
|
NoStorage: ctx.Bool(utils.ExcludeStorageFlag.Name),
|
||||||
|
TopN: topN,
|
||||||
|
Path: ctx.String(utils.OutputFileFlag.Name),
|
||||||
|
}
|
||||||
|
|
||||||
|
if summarizePath := ctx.String(inspectTrieSummarizeFlag.Name); summarizePath != "" {
|
||||||
|
if ctx.NArg() > 0 {
|
||||||
|
return fmt.Errorf("block number argument is not supported with --%s", inspectTrieSummarizeFlag.Name)
|
||||||
|
}
|
||||||
|
config.DumpPath = summarizePath
|
||||||
|
log.Info("Summarizing trie dump", "path", summarizePath, "top", topN)
|
||||||
|
return trie.Summarize(summarizePath, config)
|
||||||
|
}
|
||||||
|
if ctx.NArg() > 1 {
|
||||||
|
return fmt.Errorf("excessive number of arguments: %v", ctx.Command.ArgsUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||||
|
defer stack.Close()
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
trieRoot common.Hash
|
||||||
|
hash common.Hash
|
||||||
|
number uint64
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case ctx.NArg() == 0 || ctx.Args().Get(0) == "latest":
|
||||||
|
head := rawdb.ReadHeadHeaderHash(db)
|
||||||
|
n, ok := rawdb.ReadHeaderNumber(db, head)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("could not load head block hash")
|
||||||
|
}
|
||||||
|
number = n
|
||||||
|
case ctx.Args().Get(0) == "snapshot":
|
||||||
|
trieRoot = rawdb.ReadSnapshotRoot(db)
|
||||||
|
number = math.MaxUint64
|
||||||
|
default:
|
||||||
|
var err error
|
||||||
|
number, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to parse blocknum, Args[0]: %v, err: %v", ctx.Args().Get(0), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if number != math.MaxUint64 {
|
||||||
|
hash = rawdb.ReadCanonicalHash(db, number)
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
return fmt.Errorf("canonical hash for block %d not found", number)
|
||||||
|
}
|
||||||
|
blockHeader := rawdb.ReadHeader(db, hash, number)
|
||||||
|
trieRoot = blockHeader.Root
|
||||||
|
}
|
||||||
|
if trieRoot == (common.Hash{}) {
|
||||||
|
log.Error("Empty root hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
config.DumpPath = ctx.String(inspectTrieDumpPathFlag.Name)
|
||||||
|
if config.DumpPath == "" {
|
||||||
|
config.DumpPath = stack.ResolvePath("trie-dump.bin")
|
||||||
|
}
|
||||||
|
|
||||||
|
triedb := utils.MakeTrieDatabase(ctx, stack, db, false, true, false)
|
||||||
|
defer triedb.Close()
|
||||||
|
|
||||||
|
if contractAddr := ctx.String(inspectTrieContractFlag.Name); contractAddr != "" {
|
||||||
|
address := common.HexToAddress(contractAddr)
|
||||||
|
log.Info("Inspecting contract", "address", address, "root", trieRoot, "block", number)
|
||||||
|
return trie.InspectContract(triedb, db, trieRoot, address)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Inspecting trie", "root", trieRoot, "block", number, "dump", config.DumpPath, "top", topN)
|
||||||
|
return trie.Inspect(triedb, trieRoot, config)
|
||||||
|
}
|
||||||
|
|
||||||
func showDBStats(db ethdb.KeyValueStater) {
|
func showDBStats(db ethdb.KeyValueStater) {
|
||||||
stats, err := db.Stat()
|
stats, err := db.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"slices"
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/accounts"
|
"github.com/ethereum/go-ethereum/accounts"
|
||||||
|
|
@ -94,6 +93,8 @@ var (
|
||||||
utils.LogNoHistoryFlag,
|
utils.LogNoHistoryFlag,
|
||||||
utils.LogExportCheckpointsFlag,
|
utils.LogExportCheckpointsFlag,
|
||||||
utils.StateHistoryFlag,
|
utils.StateHistoryFlag,
|
||||||
|
utils.TrienodeHistoryFlag,
|
||||||
|
utils.TrienodeHistoryFullValueCheckpointFlag,
|
||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
utils.EthRequiredBlocksFlag,
|
utils.EthRequiredBlocksFlag,
|
||||||
utils.LegacyWhitelistFlag, // deprecated
|
utils.LegacyWhitelistFlag, // deprecated
|
||||||
|
|
@ -118,6 +119,7 @@ var (
|
||||||
utils.MinerGasPriceFlag,
|
utils.MinerGasPriceFlag,
|
||||||
utils.MinerEtherbaseFlag, // deprecated
|
utils.MinerEtherbaseFlag, // deprecated
|
||||||
utils.MinerExtraDataFlag,
|
utils.MinerExtraDataFlag,
|
||||||
|
utils.MinerMaxBlobsFlag,
|
||||||
utils.MinerRecommitIntervalFlag,
|
utils.MinerRecommitIntervalFlag,
|
||||||
utils.MinerPendingFeeRecipientFlag,
|
utils.MinerPendingFeeRecipientFlag,
|
||||||
utils.MinerNewPayloadTimeoutFlag, // deprecated
|
utils.MinerNewPayloadTimeoutFlag, // deprecated
|
||||||
|
|
@ -156,6 +158,7 @@ var (
|
||||||
utils.BeaconGenesisTimeFlag,
|
utils.BeaconGenesisTimeFlag,
|
||||||
utils.BeaconCheckpointFlag,
|
utils.BeaconCheckpointFlag,
|
||||||
utils.BeaconCheckpointFileFlag,
|
utils.BeaconCheckpointFileFlag,
|
||||||
|
utils.LogSlowBlockFlag,
|
||||||
}, utils.NetworkFlags, utils.DatabaseFlags)
|
}, utils.NetworkFlags, utils.DatabaseFlags)
|
||||||
|
|
||||||
rpcFlags = []cli.Flag{
|
rpcFlags = []cli.Flag{
|
||||||
|
|
@ -191,6 +194,14 @@ var (
|
||||||
utils.BatchResponseMaxSize,
|
utils.BatchResponseMaxSize,
|
||||||
utils.RPCTxSyncDefaultTimeoutFlag,
|
utils.RPCTxSyncDefaultTimeoutFlag,
|
||||||
utils.RPCTxSyncMaxTimeoutFlag,
|
utils.RPCTxSyncMaxTimeoutFlag,
|
||||||
|
utils.RPCGlobalRangeLimitFlag,
|
||||||
|
utils.RPCTelemetryFlag,
|
||||||
|
utils.RPCTelemetryEndpointFlag,
|
||||||
|
utils.RPCTelemetryUserFlag,
|
||||||
|
utils.RPCTelemetryPasswordFlag,
|
||||||
|
utils.RPCTelemetryInstanceIDFlag,
|
||||||
|
utils.RPCTelemetryTagsFlag,
|
||||||
|
utils.RPCTelemetrySampleRatioFlag,
|
||||||
}
|
}
|
||||||
|
|
||||||
metricsFlags = []cli.Flag{
|
metricsFlags = []cli.Flag{
|
||||||
|
|
@ -204,6 +215,7 @@ var (
|
||||||
utils.MetricsInfluxDBUsernameFlag,
|
utils.MetricsInfluxDBUsernameFlag,
|
||||||
utils.MetricsInfluxDBPasswordFlag,
|
utils.MetricsInfluxDBPasswordFlag,
|
||||||
utils.MetricsInfluxDBTagsFlag,
|
utils.MetricsInfluxDBTagsFlag,
|
||||||
|
utils.MetricsInfluxDBIntervalFlag,
|
||||||
utils.MetricsEnableInfluxDBV2Flag,
|
utils.MetricsEnableInfluxDBV2Flag,
|
||||||
utils.MetricsInfluxDBTokenFlag,
|
utils.MetricsInfluxDBTokenFlag,
|
||||||
utils.MetricsInfluxDBBucketFlag,
|
utils.MetricsInfluxDBBucketFlag,
|
||||||
|
|
@ -239,7 +251,6 @@ func init() {
|
||||||
javascriptCommand,
|
javascriptCommand,
|
||||||
// See misccmd.go:
|
// See misccmd.go:
|
||||||
versionCommand,
|
versionCommand,
|
||||||
versionCheckCommand,
|
|
||||||
licenseCommand,
|
licenseCommand,
|
||||||
// See config.go
|
// See config.go
|
||||||
dumpConfigCommand,
|
dumpConfigCommand,
|
||||||
|
|
@ -249,8 +260,8 @@ func init() {
|
||||||
utils.ShowDeprecated,
|
utils.ShowDeprecated,
|
||||||
// See snapshot.go
|
// See snapshot.go
|
||||||
snapshotCommand,
|
snapshotCommand,
|
||||||
// See verkle.go
|
// See bintrie_convert.go
|
||||||
verkleCommand,
|
bintrieCommand,
|
||||||
}
|
}
|
||||||
if logTestCommand != nil {
|
if logTestCommand != nil {
|
||||||
app.Commands = append(app.Commands, logTestCommand)
|
app.Commands = append(app.Commands, logTestCommand)
|
||||||
|
|
@ -306,18 +317,6 @@ func prepare(ctx *cli.Context) {
|
||||||
case !ctx.IsSet(utils.NetworkIdFlag.Name):
|
case !ctx.IsSet(utils.NetworkIdFlag.Name):
|
||||||
log.Info("Starting Geth on Ethereum mainnet...")
|
log.Info("Starting Geth on Ethereum mainnet...")
|
||||||
}
|
}
|
||||||
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
|
||||||
if !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
|
|
||||||
// Make sure we're not on any supported preconfigured testnet either
|
|
||||||
if !ctx.IsSet(utils.HoleskyFlag.Name) &&
|
|
||||||
!ctx.IsSet(utils.SepoliaFlag.Name) &&
|
|
||||||
!ctx.IsSet(utils.HoodiFlag.Name) &&
|
|
||||||
!ctx.IsSet(utils.DeveloperFlag.Name) {
|
|
||||||
// Nope, we're really on mainnet. Bump that cache up!
|
|
||||||
log.Info("Bumping default cache on mainnet", "provided", ctx.Int(utils.CacheFlag.Name), "updated", 4096)
|
|
||||||
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// geth is the main entry point into the system if no special subcommand is run.
|
// geth is the main entry point into the system if no special subcommand is run.
|
||||||
|
|
|
||||||
|
|
@ -27,16 +27,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
VersionCheckUrlFlag = &cli.StringFlag{
|
|
||||||
Name: "check.url",
|
|
||||||
Usage: "URL to use when checking vulnerabilities",
|
|
||||||
Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json",
|
|
||||||
}
|
|
||||||
VersionCheckVersionFlag = &cli.StringFlag{
|
|
||||||
Name: "check.version",
|
|
||||||
Usage: "Version to check",
|
|
||||||
Value: version.ClientName(clientIdentifier),
|
|
||||||
}
|
|
||||||
versionCommand = &cli.Command{
|
versionCommand = &cli.Command{
|
||||||
Action: printVersion,
|
Action: printVersion,
|
||||||
Name: "version",
|
Name: "version",
|
||||||
|
|
@ -44,20 +34,6 @@ var (
|
||||||
ArgsUsage: " ",
|
ArgsUsage: " ",
|
||||||
Description: `
|
Description: `
|
||||||
The output of this command is supposed to be machine-readable.
|
The output of this command is supposed to be machine-readable.
|
||||||
`,
|
|
||||||
}
|
|
||||||
versionCheckCommand = &cli.Command{
|
|
||||||
Action: versionCheck,
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
VersionCheckUrlFlag,
|
|
||||||
VersionCheckVersionFlag,
|
|
||||||
},
|
|
||||||
Name: "version-check",
|
|
||||||
Usage: "Checks (online) for known Geth security vulnerabilities",
|
|
||||||
ArgsUsage: "<versionstring (optional)>",
|
|
||||||
Description: `
|
|
||||||
The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json,
|
|
||||||
and displays information about any security vulnerabilities that affect the currently executing version.
|
|
||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
licenseCommand = &cli.Command{
|
licenseCommand = &cli.Command{
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/ethereum/go-ethereum/triedb"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -105,7 +106,9 @@ information about the specified address.
|
||||||
Usage: "Traverse the state with given root hash and perform quick verification",
|
Usage: "Traverse the state with given root hash and perform quick verification",
|
||||||
ArgsUsage: "<root>",
|
ArgsUsage: "<root>",
|
||||||
Action: traverseState,
|
Action: traverseState,
|
||||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
Flags: slices.Concat([]cli.Flag{
|
||||||
|
utils.AccountFlag,
|
||||||
|
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||||
Description: `
|
Description: `
|
||||||
geth snapshot traverse-state <state-root>
|
geth snapshot traverse-state <state-root>
|
||||||
will traverse the whole state from the given state root and will abort if any
|
will traverse the whole state from the given state root and will abort if any
|
||||||
|
|
@ -113,6 +116,8 @@ referenced trie node or contract code is missing. This command can be used for
|
||||||
state integrity verification. The default checking target is the HEAD state.
|
state integrity verification. The default checking target is the HEAD state.
|
||||||
|
|
||||||
It's also usable without snapshot enabled.
|
It's also usable without snapshot enabled.
|
||||||
|
|
||||||
|
If --account is specified, only the storage trie of that account is traversed.
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -120,7 +125,9 @@ It's also usable without snapshot enabled.
|
||||||
Usage: "Traverse the state with given root hash and perform detailed verification",
|
Usage: "Traverse the state with given root hash and perform detailed verification",
|
||||||
ArgsUsage: "<root>",
|
ArgsUsage: "<root>",
|
||||||
Action: traverseRawState,
|
Action: traverseRawState,
|
||||||
Flags: slices.Concat(utils.NetworkFlags, utils.DatabaseFlags),
|
Flags: slices.Concat([]cli.Flag{
|
||||||
|
utils.AccountFlag,
|
||||||
|
}, utils.NetworkFlags, utils.DatabaseFlags),
|
||||||
Description: `
|
Description: `
|
||||||
geth snapshot traverse-rawstate <state-root>
|
geth snapshot traverse-rawstate <state-root>
|
||||||
will traverse the whole state from the given root and will abort if any referenced
|
will traverse the whole state from the given root and will abort if any referenced
|
||||||
|
|
@ -129,6 +136,8 @@ verification. The default checking target is the HEAD state. It's basically iden
|
||||||
to traverse-state, but the check granularity is smaller.
|
to traverse-state, but the check granularity is smaller.
|
||||||
|
|
||||||
It's also usable without snapshot enabled.
|
It's also usable without snapshot enabled.
|
||||||
|
|
||||||
|
If --account is specified, only the storage trie of that account is traversed.
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
@ -272,6 +281,120 @@ func checkDanglingStorage(ctx *cli.Context) error {
|
||||||
return snapshot.CheckDanglingStorage(db)
|
return snapshot.CheckDanglingStorage(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseAccount parses the account flag value as either an address (20 bytes)
|
||||||
|
// or an account hash (32 bytes) and returns the hashed account key.
|
||||||
|
func parseAccount(input string) (common.Hash, error) {
|
||||||
|
switch len(input) {
|
||||||
|
case 40, 42: // address
|
||||||
|
return crypto.Keccak256Hash(common.HexToAddress(input).Bytes()), nil
|
||||||
|
case 64, 66: // hash
|
||||||
|
return common.HexToHash(input), nil
|
||||||
|
default:
|
||||||
|
return common.Hash{}, errors.New("malformed account address or hash")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookupAccount resolves the account from the state trie using the given
|
||||||
|
// account hash.
|
||||||
|
func lookupAccount(accountHash common.Hash, tr *trie.Trie) (*types.StateAccount, error) {
|
||||||
|
accData, err := tr.Get(accountHash.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get account %s: %w", accountHash, err)
|
||||||
|
}
|
||||||
|
if accData == nil {
|
||||||
|
return nil, fmt.Errorf("account not found: %s", accountHash)
|
||||||
|
}
|
||||||
|
var acc types.StateAccount
|
||||||
|
if err := rlp.DecodeBytes(accData, &acc); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid account data %s: %w", accountHash, err)
|
||||||
|
}
|
||||||
|
return &acc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func traverseStorage(id *trie.ID, db *triedb.Database, report bool, detail bool) error {
|
||||||
|
tr, err := trie.NewStateTrie(id, db)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to open storage trie", "account", id.Owner, "root", id.Root, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
slots int
|
||||||
|
nodes int
|
||||||
|
lastReport time.Time
|
||||||
|
start = time.Now()
|
||||||
|
)
|
||||||
|
it, err := tr.NodeIterator(nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to open storage iterator", "account", id.Owner, "root", id.Root, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger := log.Debug
|
||||||
|
if report {
|
||||||
|
logger = log.Info
|
||||||
|
}
|
||||||
|
logger("Start traversing storage trie", "account", id.Owner, "storageRoot", id.Root)
|
||||||
|
|
||||||
|
if !detail {
|
||||||
|
iter := trie.NewIterator(it)
|
||||||
|
for iter.Next() {
|
||||||
|
slots += 1
|
||||||
|
if time.Since(lastReport) > time.Second*8 {
|
||||||
|
logger("Traversing storage", "account", id.Owner, "slots", slots, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
lastReport = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if iter.Err != nil {
|
||||||
|
log.Error("Failed to traverse storage trie", "root", id.Root, "err", iter.Err)
|
||||||
|
return iter.Err
|
||||||
|
}
|
||||||
|
logger("Storage is complete", "account", id.Owner, "slots", slots, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
} else {
|
||||||
|
reader, err := db.NodeReader(id.StateRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to open state reader", "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
buffer = make([]byte, 32)
|
||||||
|
hasher = crypto.NewKeccakState()
|
||||||
|
)
|
||||||
|
for it.Next(true) {
|
||||||
|
nodes += 1
|
||||||
|
node := it.Hash()
|
||||||
|
|
||||||
|
// Check the presence for non-empty hash node(embedded node doesn't
|
||||||
|
// have their own hash).
|
||||||
|
if node != (common.Hash{}) {
|
||||||
|
blob, _ := reader.Node(id.Owner, it.Path(), node)
|
||||||
|
if len(blob) == 0 {
|
||||||
|
log.Error("Missing trie node(storage)", "hash", node)
|
||||||
|
return errors.New("missing storage")
|
||||||
|
}
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(blob)
|
||||||
|
hasher.Read(buffer)
|
||||||
|
if !bytes.Equal(buffer, node.Bytes()) {
|
||||||
|
log.Error("Invalid trie node(storage)", "hash", node.Hex(), "value", blob)
|
||||||
|
return errors.New("invalid storage node")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if it.Leaf() {
|
||||||
|
slots += 1
|
||||||
|
}
|
||||||
|
if time.Since(lastReport) > time.Second*8 {
|
||||||
|
logger("Traversing storage", "account", id.Owner, "nodes", nodes, "slots", slots, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
lastReport = time.Now()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := it.Error(); err != nil {
|
||||||
|
log.Error("Failed to traverse storage trie", "root", id.Root, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logger("Storage is complete", "account", id.Owner, "nodes", nodes, "slots", slots, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// traverseState is a helper function used for pruning verification.
|
// traverseState is a helper function used for pruning verification.
|
||||||
// Basically it just iterates the trie, ensure all nodes and associated
|
// Basically it just iterates the trie, ensure all nodes and associated
|
||||||
// contract codes are present.
|
// contract codes are present.
|
||||||
|
|
@ -309,6 +432,30 @@ func traverseState(ctx *cli.Context) error {
|
||||||
root = headBlock.Root()
|
root = headBlock.Root()
|
||||||
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
|
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
|
||||||
}
|
}
|
||||||
|
// If --account is specified, only traverse the storage trie of that account.
|
||||||
|
if accountStr := ctx.String(utils.AccountFlag.Name); accountStr != "" {
|
||||||
|
accountHash, err := parseAccount(accountStr)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to parse account", "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Use raw trie since the account key is already hashed.
|
||||||
|
t, err := trie.New(trie.StateTrieID(root), triedb)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to open state trie", "root", root, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
acc, err := lookupAccount(accountHash, t)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to look up account", "hash", accountHash, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if acc.Root == types.EmptyRootHash {
|
||||||
|
log.Info("Account has no storage", "hash", accountHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return traverseStorage(trie.StorageTrieID(root, accountHash, acc.Root), triedb, true, false)
|
||||||
|
}
|
||||||
t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
|
t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to open trie", "root", root, "err", err)
|
log.Error("Failed to open trie", "root", root, "err", err)
|
||||||
|
|
@ -335,30 +482,10 @@ func traverseState(ctx *cli.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if acc.Root != types.EmptyRootHash {
|
if acc.Root != types.EmptyRootHash {
|
||||||
id := trie.StorageTrieID(root, common.BytesToHash(accIter.Key), acc.Root)
|
err := traverseStorage(trie.StorageTrieID(root, common.BytesToHash(accIter.Key), acc.Root), triedb, false, false)
|
||||||
storageTrie, err := trie.NewStateTrie(id, triedb)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
storageIt, err := storageTrie.NodeIterator(nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to open storage iterator", "root", acc.Root, "err", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
storageIter := trie.NewIterator(storageIt)
|
|
||||||
for storageIter.Next() {
|
|
||||||
slots += 1
|
|
||||||
|
|
||||||
if time.Since(lastReport) > time.Second*8 {
|
|
||||||
log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
|
|
||||||
lastReport = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if storageIter.Err != nil {
|
|
||||||
log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err)
|
|
||||||
return storageIter.Err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) {
|
if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) {
|
||||||
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
|
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
|
||||||
|
|
@ -418,6 +545,30 @@ func traverseRawState(ctx *cli.Context) error {
|
||||||
root = headBlock.Root()
|
root = headBlock.Root()
|
||||||
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
|
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
|
||||||
}
|
}
|
||||||
|
// If --account is specified, only traverse the storage trie of that account.
|
||||||
|
if accountStr := ctx.String(utils.AccountFlag.Name); accountStr != "" {
|
||||||
|
accountHash, err := parseAccount(accountStr)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to parse account", "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Use raw trie since the account key is already hashed.
|
||||||
|
t, err := trie.New(trie.StateTrieID(root), triedb)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to open state trie", "root", root, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
acc, err := lookupAccount(accountHash, t)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to look up account", "hash", accountHash, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if acc.Root == types.EmptyRootHash {
|
||||||
|
log.Info("Account has no storage", "hash", accountHash)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return traverseStorage(trie.StorageTrieID(root, accountHash, acc.Root), triedb, true, true)
|
||||||
|
}
|
||||||
t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
|
t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to open trie", "root", root, "err", err)
|
log.Error("Failed to open trie", "root", root, "err", err)
|
||||||
|
|
@ -473,50 +624,10 @@ func traverseRawState(ctx *cli.Context) error {
|
||||||
return errors.New("invalid account")
|
return errors.New("invalid account")
|
||||||
}
|
}
|
||||||
if acc.Root != types.EmptyRootHash {
|
if acc.Root != types.EmptyRootHash {
|
||||||
id := trie.StorageTrieID(root, common.BytesToHash(accIter.LeafKey()), acc.Root)
|
err := traverseStorage(trie.StorageTrieID(root, common.BytesToHash(accIter.LeafKey()), acc.Root), triedb, false, true)
|
||||||
storageTrie, err := trie.NewStateTrie(id, triedb)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
|
|
||||||
return errors.New("missing storage trie")
|
|
||||||
}
|
|
||||||
storageIter, err := storageTrie.NodeIterator(nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Failed to open storage iterator", "root", acc.Root, "err", err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for storageIter.Next(true) {
|
|
||||||
nodes += 1
|
|
||||||
node := storageIter.Hash()
|
|
||||||
|
|
||||||
// Check the presence for non-empty hash node(embedded node doesn't
|
|
||||||
// have their own hash).
|
|
||||||
if node != (common.Hash{}) {
|
|
||||||
blob, _ := reader.Node(common.BytesToHash(accIter.LeafKey()), storageIter.Path(), node)
|
|
||||||
if len(blob) == 0 {
|
|
||||||
log.Error("Missing trie node(storage)", "hash", node)
|
|
||||||
return errors.New("missing storage")
|
|
||||||
}
|
|
||||||
hasher.Reset()
|
|
||||||
hasher.Write(blob)
|
|
||||||
hasher.Read(got)
|
|
||||||
if !bytes.Equal(got, node.Bytes()) {
|
|
||||||
log.Error("Invalid trie node(storage)", "hash", node.Hex(), "value", blob)
|
|
||||||
return errors.New("invalid storage node")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Bump the counter if it's leaf node.
|
|
||||||
if storageIter.Leaf() {
|
|
||||||
slots += 1
|
|
||||||
}
|
|
||||||
if time.Since(lastReport) > time.Second*8 {
|
|
||||||
log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
|
|
||||||
lastReport = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if storageIter.Error() != nil {
|
|
||||||
log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error())
|
|
||||||
return storageIter.Error()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) {
|
if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash.Bytes()) {
|
||||||
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
|
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
|
||||||
|
|
@ -639,11 +750,11 @@ func snapshotExportPreimages(ctx *cli.Context) error {
|
||||||
|
|
||||||
var root common.Hash
|
var root common.Hash
|
||||||
if ctx.NArg() > 1 {
|
if ctx.NArg() > 1 {
|
||||||
rootBytes := common.FromHex(ctx.Args().Get(1))
|
hash := ctx.Args().Get(1)
|
||||||
if len(rootBytes) != common.HashLength {
|
if !common.IsHexHash(hash) {
|
||||||
return fmt.Errorf("invalid hash: %s", ctx.Args().Get(1))
|
return fmt.Errorf("invalid hash: %s", ctx.Args().Get(1))
|
||||||
}
|
}
|
||||||
root = common.BytesToHash(rootBytes)
|
root = common.HexToHash(hash)
|
||||||
} else {
|
} else {
|
||||||
headBlock := rawdb.ReadHeadBlock(chaindb)
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||||
if headBlock == nil {
|
if headBlock == nil {
|
||||||
|
|
|
||||||
202
cmd/geth/testdata/vcheck/data.json
vendored
202
cmd/geth/testdata/vcheck/data.json
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": "CorruptedDAG",
|
|
||||||
"uid": "GETH-2020-01",
|
|
||||||
"summary": "Mining nodes will generate erroneous PoW on epochs > `385`.",
|
|
||||||
"description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/pull/21793",
|
|
||||||
"https://blog.ethereum.org/2020/11/12/geth-security-release",
|
|
||||||
"https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49",
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-v592-xf75-856p"
|
|
||||||
],
|
|
||||||
"introduced": "v1.6.0",
|
|
||||||
"fixed": "v1.9.24",
|
|
||||||
"published": "2020-11-12",
|
|
||||||
"severity": "Medium",
|
|
||||||
"CVE": "CVE-2020-26240",
|
|
||||||
"check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.\\d-.*|Geth\\/v1\\.9\\.1.*|Geth\\/v1\\.9\\.2(0|1|2|3)-.*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Denial of service due to Go CVE-2020-28362",
|
|
||||||
"uid": "GETH-2020-02",
|
|
||||||
"summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`",
|
|
||||||
"description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.",
|
|
||||||
"links": [
|
|
||||||
"https://blog.ethereum.org/2020/11/12/geth-security-release",
|
|
||||||
"https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM",
|
|
||||||
"https://github.com/golang/go/issues/42552",
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-m6gx-rhvj-fh52"
|
|
||||||
],
|
|
||||||
"introduced": "v0.0.0",
|
|
||||||
"fixed": "v1.9.24",
|
|
||||||
"published": "2020-11-12",
|
|
||||||
"severity": "Critical",
|
|
||||||
"CVE": "CVE-2020-28362",
|
|
||||||
"check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "ShallowCopy",
|
|
||||||
"uid": "GETH-2020-03",
|
|
||||||
"summary": "A consensus flaw in Geth, related to `datacopy` precompile",
|
|
||||||
"description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.",
|
|
||||||
"links": [
|
|
||||||
"https://blog.ethereum.org/2020/11/12/geth-security-release",
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-69v6-xc2j-r2jf"
|
|
||||||
],
|
|
||||||
"introduced": "v1.9.7",
|
|
||||||
"fixed": "v1.9.17",
|
|
||||||
"published": "2020-11-12",
|
|
||||||
"severity": "Critical",
|
|
||||||
"CVE": "CVE-2020-26241",
|
|
||||||
"check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Geth DoS via MULMOD",
|
|
||||||
"uid": "GETH-2020-04",
|
|
||||||
"summary": "A denial-of-service issue can be used to crash Geth nodes during block processing",
|
|
||||||
"description": "Affected versions suffer from a vulnerability which can be exploited through the `MULMOD` operation, by specifying a modulo of `0`: `mulmod(a,b,0)`, causing a `panic` in the underlying library. \nThe crash was in the `uint256` library, where a buffer [underflowed](https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L442).\n\n\tif `d == 0`, `dLen` remains `0`\n\nand https://github.com/holiman/uint256/blob/4ce82e695c10ddad57215bdbeafb68b8c5df2c30/uint256.go#L451 will try to access index `[-1]`.\n\nThe `uint256` library was first merged in this [commit](https://github.com/ethereum/go-ethereum/commit/cf6674539c589f80031f3371a71c6a80addbe454), on 2020-06-08. \nExploiting this vulnerabilty would cause all vulnerable nodes to drop off the network. \n\nThe issue was brought to our attention through a [bug report](https://github.com/ethereum/go-ethereum/issues/21367), showing a `panic` occurring on sync from genesis on the Ropsten network.\n \nIt was estimated that the least obvious way to fix this would be to merge the fix into `uint256`, make a new release of that library and then update the geth-dependency.\n",
|
|
||||||
"links": [
|
|
||||||
"https://blog.ethereum.org/2020/11/12/geth-security-release",
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m",
|
|
||||||
"https://github.com/holiman/uint256/releases/tag/v1.1.1",
|
|
||||||
"https://github.com/holiman/uint256/pull/80",
|
|
||||||
"https://github.com/ethereum/go-ethereum/pull/21368"
|
|
||||||
],
|
|
||||||
"introduced": "v1.9.16",
|
|
||||||
"fixed": "v1.9.18",
|
|
||||||
"published": "2020-11-12",
|
|
||||||
"severity": "Critical",
|
|
||||||
"CVE": "CVE-2020-26242",
|
|
||||||
"check": "Geth\\/v1\\.9.(16|17).*$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "LES Server DoS via GetProofsV2",
|
|
||||||
"uid": "GETH-2020-05",
|
|
||||||
"summary": "A DoS vulnerability can make a LES server crash.",
|
|
||||||
"description": "A DoS vulnerability can make a LES server crash via malicious GetProofsV2 request from a connected LES client.\n\nThe vulnerability was patched in #21896.\n\nThis vulnerability only concern users explicitly running geth as a light server",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-r33q-22hv-j29q",
|
|
||||||
"https://github.com/ethereum/go-ethereum/pull/21896"
|
|
||||||
],
|
|
||||||
"introduced": "v1.8.0",
|
|
||||||
"fixed": "v1.9.25",
|
|
||||||
"published": "2020-12-10",
|
|
||||||
"severity": "Medium",
|
|
||||||
"CVE": "CVE-2020-26264",
|
|
||||||
"check": "(Geth\\/v1\\.8\\.*)|(Geth\\/v1\\.9\\.\\d-.*)|(Geth\\/v1\\.9\\.1\\d-.*)|(Geth\\/v1\\.9\\.(20|21|22|23|24)-.*)$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "SELFDESTRUCT-recreate consensus flaw",
|
|
||||||
"uid": "GETH-2020-06",
|
|
||||||
"introduced": "v1.9.4",
|
|
||||||
"fixed": "v1.9.20",
|
|
||||||
"summary": "A consensus-vulnerability in Geth could cause a chain split, where vulnerable versions refuse to accept the canonical chain.",
|
|
||||||
"description": "A flaw was repoted at 2020-08-11 by John Youngseok Yang (Software Platform Lab), where a particular sequence of transactions could cause a consensus failure.\n\n- Tx 1:\n - `sender` invokes `caller`.\n - `caller` invokes `0xaa`. `0xaa` has 3 wei, does a self-destruct-to-self\n - `caller` does a `1 wei` -call to `0xaa`, who thereby has 1 wei (the code in `0xaa` still executed, since the tx is still ongoing, but doesn't redo the selfdestruct, it takes a different path if callvalue is non-zero)\n\n-Tx 2:\n - `sender` does a 5-wei call to 0xaa. No exec (since no code). \n\nIn geth, the result would be that `0xaa` had `6 wei`, whereas OE reported (correctly) `5` wei. Furthermore, in geth, if the second tx was not executed, the `0xaa` would be destructed, resulting in `0 wei`. Thus obviously wrong. \n\nIt was determined that the root cause was this [commit](https://github.com/ethereum/go-ethereum/commit/223b950944f494a5b4e0957fd9f92c48b09037ad) from [this PR](https://github.com/ethereum/go-ethereum/pull/19953). The semantics of `createObject` was subtly changd, into returning a non-nil object (with `deleted=true`) where it previously did not if the account had been destructed. This return value caused the new object to inherit the old `balance`.\n",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-xw37-57qp-9mm4"
|
|
||||||
],
|
|
||||||
"published": "2020-12-10",
|
|
||||||
"severity": "High",
|
|
||||||
"CVE": "CVE-2020-26265",
|
|
||||||
"check": "(Geth\\/v1\\.9\\.(4|5|6|7|8|9)-.*)|(Geth\\/v1\\.9\\.1\\d-.*)$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "Not ready for London upgrade",
|
|
||||||
"uid": "GETH-2021-01",
|
|
||||||
"summary": "The client is not ready for the 'London' technical upgrade, and will deviate from the canonical chain when the London upgrade occurs (at block '12965000' around August 4, 2021.",
|
|
||||||
"description": "At (or around) August 4, Ethereum will undergo a technical upgrade called 'London'. Clients not upgraded will fail to progress on the canonical chain.",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/eth1.0-specs/blob/master/network-upgrades/mainnet-upgrades/london.md",
|
|
||||||
"https://notes.ethereum.org/@timbeiko/ropsten-postmortem"
|
|
||||||
],
|
|
||||||
"introduced": "v1.10.1",
|
|
||||||
"fixed": "v1.10.6",
|
|
||||||
"published": "2021-07-22",
|
|
||||||
"severity": "High",
|
|
||||||
"check": "(Geth\\/v1\\.10\\.(1|2|3|4|5)-.*)$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "RETURNDATA corruption via datacopy",
|
|
||||||
"uid": "GETH-2021-02",
|
|
||||||
"summary": "A consensus-flaw in the Geth EVM could cause a node to deviate from the canonical chain.",
|
|
||||||
"description": "A memory-corruption bug within the EVM can cause a consensus error, where vulnerable nodes obtain a different `stateRoot` when processing a maliciously crafted transaction. This, in turn, would lead to the chain being split: mainnet splitting in two forks.\n\nAll Geth versions supporting the London hard fork are vulnerable (the bug is older than London), so all users should update.\n\nThis bug was exploited on Mainnet at block 13107518.\n\nCredits for the discovery go to @guidovranken (working for Sentnl during an audit of the Telos EVM) and reported via bounty@ethereum.org.",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/blob/master/docs/postmortems/2021-08-22-split-postmortem.md",
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-9856-9gg9-qcmq",
|
|
||||||
"https://github.com/ethereum/go-ethereum/releases/tag/v1.10.8"
|
|
||||||
],
|
|
||||||
"introduced": "v1.10.0",
|
|
||||||
"fixed": "v1.10.8",
|
|
||||||
"published": "2021-08-24",
|
|
||||||
"severity": "High",
|
|
||||||
"CVE": "CVE-2021-39137",
|
|
||||||
"check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7)-.*)$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "DoS via malicious `snap/1` request",
|
|
||||||
"uid": "GETH-2021-03",
|
|
||||||
"summary": "A vulnerable node is susceptible to crash when processing a maliciously crafted message from a peer, via the snap/1 protocol. The crash can be triggered by sending a malicious snap/1 GetTrieNodes package.",
|
|
||||||
"description": "The `snap/1` protocol handler contains two vulnerabilities related to the `GetTrieNodes` packet, which can be exploited to crash the node. Full details are available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-59hh-656j-3p7v)",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-59hh-656j-3p7v",
|
|
||||||
"https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities",
|
|
||||||
"https://github.com/ethereum/go-ethereum/pull/23657"
|
|
||||||
],
|
|
||||||
"introduced": "v1.10.0",
|
|
||||||
"fixed": "v1.10.9",
|
|
||||||
"published": "2021-10-24",
|
|
||||||
"severity": "Medium",
|
|
||||||
"CVE": "CVE-2021-41173",
|
|
||||||
"check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8)-.*)$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "DoS via malicious p2p message",
|
|
||||||
"uid": "GETH-2022-01",
|
|
||||||
"summary": "A vulnerable node can crash via p2p messages sent from an attacker node, if running with non-default log options.",
|
|
||||||
"description": "A vulnerable node, if configured to use high verbosity logging, can be made to crash when handling specially crafted p2p messages sent from an attacker node. Full details are available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-wjxw-gh3m-7pm5)",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-wjxw-gh3m-7pm5",
|
|
||||||
"https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities",
|
|
||||||
"https://github.com/ethereum/go-ethereum/pull/24507"
|
|
||||||
],
|
|
||||||
"introduced": "v1.10.0",
|
|
||||||
"fixed": "v1.10.17",
|
|
||||||
"published": "2022-05-11",
|
|
||||||
"severity": "Low",
|
|
||||||
"CVE": "CVE-2022-29177",
|
|
||||||
"check": "(Geth\\/v1\\.10\\.(0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16)-.*)$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "DoS via malicious p2p message",
|
|
||||||
"uid": "GETH-2023-01",
|
|
||||||
"summary": "A vulnerable node can be made to consume unbounded amounts of memory when handling specially crafted p2p messages sent from an attacker node.",
|
|
||||||
"description": "The p2p handler spawned a new goroutine to respond to ping requests. By flooding a node with ping requests, an unbounded number of goroutines can be created, leading to resource exhaustion and potentially crash due to OOM.",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-ppjg-v974-84cm",
|
|
||||||
"https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities"
|
|
||||||
],
|
|
||||||
"introduced": "v1.10.0",
|
|
||||||
"fixed": "v1.12.1",
|
|
||||||
"published": "2023-09-06",
|
|
||||||
"severity": "High",
|
|
||||||
"CVE": "CVE-2023-40591",
|
|
||||||
"check": "(Geth\\/v1\\.(10|11)\\..*)|(Geth\\/v1\\.12\\.0-.*)$"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "DoS via malicious p2p message",
|
|
||||||
"uid": "GETH-2024-01",
|
|
||||||
"summary": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node.",
|
|
||||||
"description": "A vulnerable node can be made to consume very large amounts of memory when handling specially crafted p2p messages sent from an attacker node. Full details will be available at the Github security [advisory](https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652)",
|
|
||||||
"links": [
|
|
||||||
"https://github.com/ethereum/go-ethereum/security/advisories/GHSA-4xc9-8hmq-j652",
|
|
||||||
"https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities"
|
|
||||||
],
|
|
||||||
"introduced": "v1.10.0",
|
|
||||||
"fixed": "v1.13.15",
|
|
||||||
"published": "2024-05-06",
|
|
||||||
"severity": "High",
|
|
||||||
"CVE": "CVE-2024-32972",
|
|
||||||
"check": "(Geth\\/v1\\.(10|11|12)\\..*)|(Geth\\/v1\\.13\\.\\d-.*)|(Geth\\/v1\\.13\\.1(0|1|2|3|4)-.*)$"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
untrusted comment: signature from minisign secret key
|
|
||||||
RUQkliYstQBOKHklFEYCUjepz81dyUuDmIAxjAvXa+icjGuKcjtVfV06G7qfOMSpplS5EcntU12n+AnGNyuOM8zIctaIWcfG2w0=
|
|
||||||
trusted comment: timestamp:1752094689 file:data.json hashed
|
|
||||||
u2e4wo4HBTU6viQTSY/NVBHoWoPFJnnTvLZS0FYl3JdvSOYi6+qpbEsDhAIFqq/n8VmlS/fPqqf7vKCNiAgjAA==
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
untrusted comment: signature from minisign secret key
|
|
||||||
RWQkliYstQBOKNoyq2O98hPmeVJQ6ShQLM58+4n0gkY0y0trFMDAsHuN/l4IyHfh8dDQ1ry0+IuZVrf/i8M/P3YFzFfAymDYCQ0=
|
|
||||||
trusted comment: timestamp:1752094703 file:data.json
|
|
||||||
cNyq3ZGlqo785HtWODb9ejWqF0HhSeXuLGXzC7z1IhnDrBObWBJngYd3qBG1dQcYlHQ+bgB/On5mSyMFn4UoCQ==
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
untrusted comment: Here's a comment
|
|
||||||
RWQkliYstQBOKNoyq2O98hPmeVJQ6ShQLM58+4n0gkY0y0trFMDAsHuN/l4IyHfh8dDQ1ry0+IuZVrf/i8M/P3YFzFfAymDYCQ0=
|
|
||||||
trusted comment: Here's a trusted comment
|
|
||||||
dL7lO8sqFFCOXJO/u8SgoDk2nlXGWPRDbOTJkChMbmtUp9PB7sG831basXkZ/0CQ/l/vG7AbPyMNEVZyJn5NCg==
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
untrusted comment: One more (untrusted™) comment
|
|
||||||
RWQkliYstQBOKNoyq2O98hPmeVJQ6ShQLM58+4n0gkY0y0trFMDAsHuN/l4IyHfh8dDQ1ry0+IuZVrf/i8M/P3YFzFfAymDYCQ0=
|
|
||||||
trusted comment: Here's a trusted comment
|
|
||||||
dL7lO8sqFFCOXJO/u8SgoDk2nlXGWPRDbOTJkChMbmtUp9PB7sG831basXkZ/0CQ/l/vG7AbPyMNEVZyJn5NCg==
|
|
||||||
2
cmd/geth/testdata/vcheck/minisign.pub
vendored
2
cmd/geth/testdata/vcheck/minisign.pub
vendored
|
|
@ -1,2 +0,0 @@
|
||||||
untrusted comment: minisign public key 284E00B52C269624
|
|
||||||
RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue