Merge branch 'master' into eof-opcodes

This commit is contained in:
Marius van der Wijden 2025-02-07 09:24:00 +01:00
commit d227a8d086
163 changed files with 3482 additions and 1749 deletions

View file

@ -5,6 +5,9 @@ Aaron Kumavis <kumavis@users.noreply.github.com>
Abel Nieto <abel.nieto90@gmail.com> Abel Nieto <abel.nieto90@gmail.com>
Abel Nieto <abel.nieto90@gmail.com> <anietoro@uwaterloo.ca> Abel Nieto <abel.nieto90@gmail.com> <anietoro@uwaterloo.ca>
Adrian Sutton <adrian@oplabs.co>
Adrian Sutton <adrian@oplabs.co> <adrian@symphonious.net>
Afri Schoedon <58883403+q9f@users.noreply.github.com> Afri Schoedon <58883403+q9f@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com> <58883403+q9f@users.noreply.github.com> Afri Schoedon <5chdn@users.noreply.github.com> <58883403+q9f@users.noreply.github.com>
@ -22,6 +25,9 @@ Alexey Akhunov <akhounov@gmail.com>
Alon Muroch <alonmuroch@gmail.com> Alon Muroch <alonmuroch@gmail.com>
Andrei Silviu Dragnea <andreidragnea.dev@gmail.com>
Andrei Silviu Dragnea <andreidragnea.dev@gmail.com> <andreisilviudragnea@gmail.com>
Andrey Petrov <shazow@gmail.com> Andrey Petrov <shazow@gmail.com>
Andrey Petrov <shazow@gmail.com> <andrey.petrov@shazow.net> Andrey Petrov <shazow@gmail.com> <andrey.petrov@shazow.net>
@ -51,11 +57,17 @@ Chris Ziogas <ziogaschr@gmail.com> <ziogas_chr@hotmail.com>
Christoph Jentzsch <jentzsch.software@gmail.com> Christoph Jentzsch <jentzsch.software@gmail.com>
Daniel Liu <liudaniel@qq.com>
Daniel Liu <liudaniel@qq.com> <139250065@qq.com>
Diederik Loerakker <proto@protolambda.com> Diederik Loerakker <proto@protolambda.com>
Dimitry Khokhlov <winsvega@mail.ru> Dimitry Khokhlov <winsvega@mail.ru>
Ha ĐANG <dvietha@gmail.com>
Domino Valdano <dominoplural@gmail.com> Domino Valdano <dominoplural@gmail.com>
Domino Valdano <dominoplural@gmail.com> <jeff@okcupid.com>
Edgar Aroutiounian <edgar.factorial@gmail.com> Edgar Aroutiounian <edgar.factorial@gmail.com>
@ -82,6 +94,9 @@ Gavin Wood <i@gavwood.com>
Gregg Dourgarian <greggd@tempworks.com> Gregg Dourgarian <greggd@tempworks.com>
guangwu <guoguangwu@magic-shield.com>
guangwu <guoguangwu@magic-shield.com> <guoguangwug@gmail.com>
Guillaume Ballet <gballet@gmail.com> Guillaume Ballet <gballet@gmail.com>
Guillaume Ballet <gballet@gmail.com> <3272758+gballet@users.noreply.github.com> Guillaume Ballet <gballet@gmail.com> <3272758+gballet@users.noreply.github.com>
@ -95,13 +110,21 @@ Heiko Hees <heiko@heiko.org>
Henning Diedrich <hd@eonblast.com> Henning Diedrich <hd@eonblast.com>
Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com> Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
henridf <henri@dubfer.com>
henridf <henri@dubfer.com> <henridf@gmail.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com> Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
Ikko Eltociear Ashimine <eltociear@gmail.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> Iskander (Alex) Sharipov <quasilyte@gmail.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> <i.sharipov@corp.vk.com> Iskander (Alex) Sharipov <quasilyte@gmail.com> <i.sharipov@corp.vk.com>
Jae Kwon <jkwon.work@gmail.com> Jae Kwon <jkwon.work@gmail.com>
James Prestwich <james@prestwi.ch>
James Prestwich <james@prestwi.ch> <10149425+prestwich@users.noreply.github.com>
Janoš Guljaš <janos@resenje.org> <janos@users.noreply.github.com> Janoš Guljaš <janos@resenje.org> <janos@users.noreply.github.com>
Janoš Guljaš <janos@resenje.org> Janos Guljas <janos@resenje.org> Janoš Guljaš <janos@resenje.org> Janos Guljas <janos@resenje.org>
@ -120,23 +143,38 @@ Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@users.noreply.github.com>
Jens Agerberg <github@agerberg.me> Jens Agerberg <github@agerberg.me>
Jeremy Schlatter <jeremy.schlatter@gmail.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com> <jeremy@jeremyschlatter.com>
John Chase <68833933+joohhnnn@users.noreply.github.com>
Joseph Chow <ethereum@outlook.com> Joseph Chow <ethereum@outlook.com>
Joseph Chow <ethereum@outlook.com> ethers <TODO> Joseph Chow <ethereum@outlook.com> ethers <TODO>
Joseph Goulden <joegoulden@gmail.com> Joseph Goulden <joegoulden@gmail.com>
Justin Drake <drakefjustin@gmail.com> Justin Drake <drakefjustin@gmail.com>
Karl Bartel <karl.bartel@clabs.co>
Karl Bartel <karl.bartel@clabs.co> <karl@karl.berlin>
Kenso Trabing <ktrabing@acm.org> Kenso Trabing <ktrabing@acm.org>
Kenso Trabing <ktrabing@acm.org> <kenso.trabing@bloomwebsite.com> Kenso Trabing <ktrabing@acm.org> <kenso.trabing@bloomwebsite.com>
Liyi Guo <102356659+colinlyguo@users.noreply.github.com>
lmittmann <3458786+lmittmann@users.noreply.github.com>
lmittmann <3458786+lmittmann@users.noreply.github.com> <lmittmann@users.noreply.github.com>
Liang Ma <liangma@liangbit.com> Liang Ma <liangma@liangbit.com>
Liang Ma <liangma@liangbit.com> <liangma.ul@gmail.com> Liang Ma <liangma@liangbit.com> <liangma.ul@gmail.com>
Louis Holbrook <dev@holbrook.no> Louis Holbrook <dev@holbrook.no>
Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com> Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com>
makcandrov <makcandrov@proton.me>
makcandrov <makcandrov@proton.me> <108467407+makcandrov@users.noreply.github.com>
Maran Hidskes <maran.hidskes@gmail.com> Maran Hidskes <maran.hidskes@gmail.com>
Marian Oancea <contact@siteshop.ro> Marian Oancea <contact@siteshop.ro>
@ -144,17 +182,33 @@ Marian Oancea <contact@siteshop.ro>
Martin Becze <mjbecze@gmail.com> Martin Becze <mjbecze@gmail.com>
Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com> Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com>
Martin Holst Swende <martin@swende.se>
Martin Lundfall <martin.lundfall@protonmail.com> Martin Lundfall <martin.lundfall@protonmail.com>
Matt Garnett <14004106+lightclient@users.noreply.github.com> Marius van der Wijden <m.vanderwijden@live.de>
Marius van der Wijden <m.vanderwijden@live.de> <115323661+vdwijden@users.noreply.github.com>
Matt Garnett <lightclient@protonmail.com>
Matt Garnett <lightclient@protonmail.com> <14004106+lightclient@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com> Matthew Halpern <matthalp@gmail.com>
Matthew Halpern <matthalp@gmail.com> <matthalp@google.com> Matthew Halpern <matthalp@gmail.com> <matthalp@google.com>
meowsbits <b5c6@protonmail.com>
meowsbits <b5c6@protonmail.com> <45600330+meowsbits@users.noreply.github.com>
Michael Riabzev <michael@starkware.co> Michael Riabzev <michael@starkware.co>
Michael de Hoog <michael.dehoog@gmail.com>
Michael de Hoog <michael.dehoog@gmail.com> <michael.dehoog@coinbase.com>
Nchinda Nchinda <nchinda2@gmail.com> Nchinda Nchinda <nchinda2@gmail.com>
Nebojsa Urosevic <nebojsa94@users.noreply.github.com>
nedifi <103940716+nedifi@users.noreply.github.com>
Nick Dodson <silentcicero@outlook.com> Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net> Nick Johnson <arachnid@notdot.net>
@ -169,6 +223,9 @@ Olivier Hervieu <olivier.hervieu@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz> Pascal Dierich <pascal@merkleplant.xyz>
Pascal Dierich <pascal@merkleplant.xyz> <pascal@pascaldierich.com> Pascal Dierich <pascal@merkleplant.xyz> <pascal@pascaldierich.com>
Paweł Bylica <chfast@gmail.com>
Paweł Bylica <chfast@gmail.com> <pawel@ethereum.org>
RJ Catalano <catalanor0220@gmail.com> RJ Catalano <catalanor0220@gmail.com>
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com> RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
@ -179,8 +236,22 @@ Rene Lubov <41963722+renaynay@users.noreply.github.com>
Robert Zaremba <robert@zaremba.ch> Robert Zaremba <robert@zaremba.ch>
Robert Zaremba <robert@zaremba.ch> <robert.zaremba@scale-it.pl> Robert Zaremba <robert@zaremba.ch> <robert.zaremba@scale-it.pl>
Roberto Bayardo <bayardo@alum.mit.edu>
Roberto Bayardo <bayardo@alum.mit.edu> <roberto.bayardo@coinbase.com>
Roman Mandeleil <roman.mandeleil@gmail.com> Roman Mandeleil <roman.mandeleil@gmail.com>
Sebastian Stammler <seb@oplabs.co>
Sebastian Stammler <seb@oplabs.co> <stammler.s@gmail.com>
Seungbae Yu <dbadoy4874@gmail.com>
Seungbae Yu <dbadoy4874@gmail.com> <72970043+dbadoy@users.noreply.github.com>
Sina Mahmoodi <1591639+s1na@users.noreply.github.com>
Steve Milk <wangpeculiar@gmail.com>
Steve Milk <wangpeculiar@gmail.com> <915337710@qq.com>
Sorin Neacsu <sorin.neacsu@gmail.com> Sorin Neacsu <sorin.neacsu@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com> Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
@ -191,8 +262,14 @@ Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
Thomas Bocek <tom@tomp2p.net> Thomas Bocek <tom@tomp2p.net>
tianyeyouyou <tianyeyouyou@gmail.com>
tianyeyouyou <tianyeyouyou@gmail.com> <150894831+tianyeyouyou@users.noreply.github.com>
Tim Cooijmans <timcooijmans@gmail.com> Tim Cooijmans <timcooijmans@gmail.com>
ucwong <ethereum2k@gmail.com>
ucwong <ethereum2k@gmail.com> <ucwong@126.com>
Valentin Wüstholz <wuestholz@gmail.com> Valentin Wüstholz <wuestholz@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com> Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
@ -221,6 +298,9 @@ Xudong Liu <33193253+r1cs@users.noreply.github.com>
Yohann Léon <sybiload@gmail.com> Yohann Léon <sybiload@gmail.com>
yzb <335357057@qq.com>
yzb <335357057@qq.com> <flyingyzb@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> Zachinquarantine <Zachinquarantine@protonmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> <zachinquarantine@yahoo.com> Zachinquarantine <Zachinquarantine@protonmail.com> <zachinquarantine@yahoo.com>
@ -228,9 +308,4 @@ Ziyuan Zhong <zzy.albert@163.com>
Zsolt Felföldi <zsfelfoldi@gmail.com> Zsolt Felföldi <zsfelfoldi@gmail.com>
meowsbits <b5c6@protonmail.com>
meowsbits <b5c6@protonmail.com> <45600330+meowsbits@users.noreply.github.com>
nedifi <103940716+nedifi@users.noreply.github.com>
Максим Чусовлянов <mchusovlianov@gmail.com> Максим Чусовлянов <mchusovlianov@gmail.com>

300
AUTHORS
View file

@ -1,52 +1,81 @@
# This is the official list of go-ethereum authors for copyright purposes. # This is the official list of go-ethereum authors for copyright purposes.
0xbeny <55846654+0xbeny@users.noreply.github.com>
0xbstn <bastien-bouge@hotmail.fr>
0xe3b0c4 <110295932+0xe3b0c4@users.noreply.github.com>
6543 <6543@obermui.de> 6543 <6543@obermui.de>
6xiaowu9 <736518585@qq.com>
a e r t h <aerth@users.noreply.github.com> a e r t h <aerth@users.noreply.github.com>
Aaron Buchwald <aaron.buchwald56@gmail.com> Aaron Buchwald <aaron.buchwald56@gmail.com>
Aaron Chen <aaronchen.lisp@gmail.com>
Aaron Kumavis <kumavis@users.noreply.github.com>
Aayush Rajasekaran <arajasek94@gmail.com>
Abel Nieto <abel.nieto90@gmail.com> Abel Nieto <abel.nieto90@gmail.com>
Abirdcfly <fp544037857@gmail.com>
Adam Babik <a.babik@designfortress.com> Adam Babik <a.babik@designfortress.com>
Adam Schmideg <adamschmideg@users.noreply.github.com> Adam Schmideg <adamschmideg@users.noreply.github.com>
Aditya <adityasripal@gmail.com> Aditya <adityasripal@gmail.com>
Aditya Arora <arora.aditya520@gmail.com> Aditya Arora <arora.aditya520@gmail.com>
Adrian Sutton <adrian@oplabs.co>
Adrià Cidre <adria.cidre@gmail.com> Adrià Cidre <adria.cidre@gmail.com>
Afanasii Kurakin <afanasy@users.noreply.github.com> Afanasii Kurakin <afanasy@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com> Afri Schoedon <5chdn@users.noreply.github.com>
Agustin Armellini Fischer <armellini13@gmail.com> Agustin Armellini Fischer <armellini13@gmail.com>
Ahmet Avci <ahmetabdullahavci07@gmail.com>
Ahyun <urbanart2251@gmail.com> Ahyun <urbanart2251@gmail.com>
Airead <fgh1987168@gmail.com> Airead <fgh1987168@gmail.com>
Alan Chen <alanchchen@users.noreply.github.com> Alan Chen <alanchchen@users.noreply.github.com>
Alejandro Isaza <alejandro.isaza@gmail.com> Alejandro Isaza <alejandro.isaza@gmail.com>
Aleksey Smyrnov <i@soar.name> Aleksey Smyrnov <i@soar.name>
Ales Katona <ales@coinbase.com> Ales Katona <ales@coinbase.com>
alex <152680487+bodhi-crypo@users.noreply.github.com>
Alex Beregszaszi <alex@rtfs.hu> Alex Beregszaszi <alex@rtfs.hu>
Alex Gartner <github@agartner.com>
Alex Leverington <alex@ethdev.com> Alex Leverington <alex@ethdev.com>
Alex Mazalov <mazalov@gmail.com> Alex Mazalov <mazalov@gmail.com>
Alex Mylonas <alex.a.mylonas@gmail.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com> Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com>
Alex Prut <1648497+alexprut@users.noreply.github.com> Alex Prut <1648497+alexprut@users.noreply.github.com>
Alex Stokes <r.alex.stokes@gmail.com>
Alex Wu <wuyiding@gmail.com> Alex Wu <wuyiding@gmail.com>
Alexander Mint <webinfo.alexander@gmail.com>
Alexander van der Meij <alexandervdm@users.noreply.github.com> Alexander van der Meij <alexandervdm@users.noreply.github.com>
Alexander Yastrebov <yastrebov.alex@gmail.com> Alexander Yastrebov <yastrebov.alex@gmail.com>
Alexandre Van de Sande <alex.vandesande@ethdev.com> Alexandre Van de Sande <alex.vandesande@ethdev.com>
Alexey Akhunov <akhounov@gmail.com> Alexey Akhunov <akhounov@gmail.com>
Alexey Shekhirin <a.shekhirin@gmail.com> Alexey Shekhirin <a.shekhirin@gmail.com>
alexwang <39109351+dipingxian2@users.noreply.github.com> alexwang <39109351+dipingxian2@users.noreply.github.com>
Alfie John <alfiedotwtf@users.noreply.github.com>
Ali Atiia <42751398+aliatiia@users.noreply.github.com> Ali Atiia <42751398+aliatiia@users.noreply.github.com>
Ali Hajimirza <Ali92hm@users.noreply.github.com> Ali Hajimirza <Ali92hm@users.noreply.github.com>
Alvaro Sevilla <alvarosevilla95@gmail.com>
am2rican5 <am2rican5@gmail.com> am2rican5 <am2rican5@gmail.com>
Amin Talebi <talebi242@gmail.com>
AMIR <31338382+amiremohamadi@users.noreply.github.com>
AmitBRD <60668103+AmitBRD@users.noreply.github.com> AmitBRD <60668103+AmitBRD@users.noreply.github.com>
Anatole <62328077+a2br@users.noreply.github.com> Anatole <62328077+a2br@users.noreply.github.com>
Andre Patta <andre_luis@outlook.com>
Andrea Franz <andrea@gravityblast.com> Andrea Franz <andrea@gravityblast.com>
Andrei Kostakov <bps@dzen.ws>
Andrei Maiboroda <andrei@ethereum.org> Andrei Maiboroda <andrei@ethereum.org>
Andrei Silviu Dragnea <andreidragnea.dev@gmail.com>
Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com>
Andrey Petrov <shazow@gmail.com> Andrey Petrov <shazow@gmail.com>
Andryanau Kanstantsin <andrianov.dev@yandex.by>
ANOTHEL <anothel1@naver.com> ANOTHEL <anothel1@naver.com>
Antoine Rondelet <rondelet.antoine@gmail.com> Antoine Rondelet <rondelet.antoine@gmail.com>
Antoine Toulme <atoulme@users.noreply.github.com> Antoine Toulme <atoulme@users.noreply.github.com>
Anton Evangelatov <anton.evangelatov@gmail.com> Anton Evangelatov <anton.evangelatov@gmail.com>
Antonio Salazar Cardozo <savedfastcool@gmail.com> Antonio Salazar Cardozo <savedfastcool@gmail.com>
Antony Denyer <email@antonydenyer.co.uk>
Anusha <63559942+anusha-ctrl@users.noreply.github.com>
Arba Sasmoyo <arba.sasmoyo@gmail.com> Arba Sasmoyo <arba.sasmoyo@gmail.com>
Armani Ferrante <armaniferrante@berkeley.edu> Armani Ferrante <armaniferrante@berkeley.edu>
Armin Braun <me@obrown.io> Armin Braun <me@obrown.io>
Aron Fischer <github@aron.guru> Aron Fischer <github@aron.guru>
Arran Schlosberg <519948+ARR4N@users.noreply.github.com>
ArtificialPB <matej.berger@hotmail.com>
Artyom Aminov <artjoma@users.noreply.github.com>
atsushi-ishibashi <atsushi.ishibashi@finatext.com> atsushi-ishibashi <atsushi.ishibashi@finatext.com>
Austin Roberts <code@ausiv.com> Austin Roberts <code@ausiv.com>
ayeowch <ayeowch@gmail.com> ayeowch <ayeowch@gmail.com>
@ -54,83 +83,135 @@ b00ris <b00ris@mail.ru>
b1ackd0t <blackd0t@protonmail.com> b1ackd0t <blackd0t@protonmail.com>
bailantaotao <Edwin@maicoin.com> bailantaotao <Edwin@maicoin.com>
baizhenxuan <nkbai@163.com> baizhenxuan <nkbai@163.com>
Bala Murali Krishna Komatireddy <krishna192reddy@gmail.com>
Balaji Shetty Pachai <32358081+balajipachai@users.noreply.github.com> Balaji Shetty Pachai <32358081+balajipachai@users.noreply.github.com>
Balint Gabor <balint.g@gmail.com> Balint Gabor <balint.g@gmail.com>
baptiste-b-pegasys <85155432+baptiste-b-pegasys@users.noreply.github.com> baptiste-b-pegasys <85155432+baptiste-b-pegasys@users.noreply.github.com>
Bas van Kervel <bas@ethdev.com> Bas van Kervel <bas@ethdev.com>
Benjamin Brent <benjamin@benjaminbrent.com> Benjamin Brent <benjamin@benjaminbrent.com>
Benjamin Prosnitz <bprosnitz@gmail.com>
benma <mbencun@gmail.com> benma <mbencun@gmail.com>
Benoit Verkindt <benoit.verkindt@gmail.com> Benoit Verkindt <benoit.verkindt@gmail.com>
Bin <49082129+songzhibin97@users.noreply.github.com>
Binacs <bin646891055@gmail.com> Binacs <bin646891055@gmail.com>
bitcoin-lightning <153181187+AtomicInnovation321@users.noreply.github.com>
bk <5810624+bkellerman@users.noreply.github.com>
bloonfield <bloonfield@163.com> bloonfield <bloonfield@163.com>
bnovil <lzqcn2000@126.com>
Bo <bohende@gmail.com> Bo <bohende@gmail.com>
Bo Ye <boy.e.computer.1982@outlook.com> Bo Ye <boy.e.computer.1982@outlook.com>
Bob Glickstein <bobg@users.noreply.github.com> Bob Glickstein <bobg@users.noreply.github.com>
Boqin Qin <bobbqqin@bupt.edu.cn> Boqin Qin <bobbqqin@bupt.edu.cn>
BorkBorked <107079055+BorkBorked@users.noreply.github.com>
Brandon Harden <b.harden92@gmail.com> Brandon Harden <b.harden92@gmail.com>
Brandon Liu <lzqcn2000@126.com>
Brent <bmperrea@gmail.com> Brent <bmperrea@gmail.com>
Brian Schroeder <bts@gmail.com> Brian Schroeder <bts@gmail.com>
Brion <4777457+cifer76@users.noreply.github.com>
Bruno Škvorc <bruno@skvorc.me> Bruno Škvorc <bruno@skvorc.me>
buddho <galaxystroller@gmail.com>
bugmaker9371 <167614621+bugmaker9371@users.noreply.github.com>
C. Brown <hackdom@majoolr.io> C. Brown <hackdom@majoolr.io>
Caesar Chad <BLUE.WEB.GEEK@gmail.com> Caesar Chad <BLUE.WEB.GEEK@gmail.com>
cam-schultz <78878559+cam-schultz@users.noreply.github.com>
Casey Detrio <cdetrio@gmail.com> Casey Detrio <cdetrio@gmail.com>
caseylove <casey4love@foxmail.com>
CDsigma <cdsigma271@gmail.com> CDsigma <cdsigma271@gmail.com>
Cedrick <Cedrickentrep@gmail.com>
Ceelog <chenwei@ceelog.org> Ceelog <chenwei@ceelog.org>
Ceyhun Onur <ceyhun.onur@avalabs.org> Ceyhun Onur <ceyhun.onur@avalabs.org>
chabashilah <doumodoumo@gmail.com> chabashilah <doumodoumo@gmail.com>
changhong <changhong.yu@shanbay.com> changhong <changhong.yu@shanbay.com>
Charles Cooper <cooper.charles.m@gmail.com>
Chase Wright <mysticryuujin@gmail.com> Chase Wright <mysticryuujin@gmail.com>
Chawin Aiemvaravutigul <nick41746@hotmail.com>
Chen Quan <terasum@163.com> Chen Quan <terasum@163.com>
chen4903 <108803001+chen4903@users.noreply.github.com>
Cheng Li <lob4tt@gmail.com> Cheng Li <lob4tt@gmail.com>
chenglin <910372762@qq.com> chenglin <910372762@qq.com>
chenyufeng <yufengcode@gmail.com> chenyufeng <yufengcode@gmail.com>
Chirag Garg <38765776+DeVil2O@users.noreply.github.com>
chirag-bgh <76247491+chirag-bgh@users.noreply.github.com>
Chris Pacia <ctpacia@gmail.com> Chris Pacia <ctpacia@gmail.com>
Chris Ziogas <ziogaschr@gmail.com> Chris Ziogas <ziogaschr@gmail.com>
Christian Muehlhaeuser <muesli@gmail.com> Christian Muehlhaeuser <muesli@gmail.com>
Christina <156356273+cratiu222@users.noreply.github.com>
Christoph Jentzsch <jentzsch.software@gmail.com> Christoph Jentzsch <jentzsch.software@gmail.com>
Christopher Harrison <31964100+chrischarlesharrison@users.noreply.github.com>
chuwt <weitaochu@gmail.com> chuwt <weitaochu@gmail.com>
cocoyeal <150209682+cocoyeal@users.noreply.github.com>
cong <ackratos@users.noreply.github.com> cong <ackratos@users.noreply.github.com>
Connor Stein <connor.stein@mail.mcgill.ca> Connor Stein <connor.stein@mail.mcgill.ca>
Corey Lin <514971757@qq.com> Corey Lin <514971757@qq.com>
courtier <derinilter@gmail.com> courtier <derinilter@gmail.com>
cpusoft <cpusoft@live.com> cpusoft <cpusoft@live.com>
crazeteam <164632007+crazeteam@users.noreply.github.com>
Crispin Flowerday <crispin@bitso.com> Crispin Flowerday <crispin@bitso.com>
croath <croathliu@gmail.com> croath <croathliu@gmail.com>
cui <523516579@qq.com> cui <523516579@qq.com>
cui fliter <imcusg@gmail.com>
cuinix <65650185+cuinix@users.noreply.github.com>
Curith <oiooj@qq.com>
cygaar <97691933+cygaar@users.noreply.github.com>
Dan Cline <6798349+Rjected@users.noreply.github.com>
Dan DeGreef <dan.degreef@gmail.com> Dan DeGreef <dan.degreef@gmail.com>
Dan Kinsley <dan@joincivil.com> Dan Kinsley <dan@joincivil.com>
Dan Laine <daniel.laine@avalabs.org>
Dan Sosedoff <dan.sosedoff@gmail.com> Dan Sosedoff <dan.sosedoff@gmail.com>
danceratopz <danceratopz@gmail.com>
Daniel A. Nagy <nagy.da@gmail.com> Daniel A. Nagy <nagy.da@gmail.com>
Daniel Fernandes <711733+daferna@users.noreply.github.com>
Daniel Katzan <108216499+dkatzan@users.noreply.github.com>
Daniel Knopik <107140945+dknopik@users.noreply.github.com>
Daniel Liu <liudaniel@qq.com>
Daniel Perez <daniel@perez.sh> Daniel Perez <daniel@perez.sh>
Daniel Sloof <goapsychadelic@gmail.com> Daniel Sloof <goapsychadelic@gmail.com>
Danno Ferrin <danno@numisight.com>
Danyal Prout <me@dany.al>
Darioush Jalali <darioush.jalali@avalabs.org> Darioush Jalali <darioush.jalali@avalabs.org>
Darrel Herbst <dherbst@gmail.com> Darrel Herbst <dherbst@gmail.com>
Darren Kelly <107671032+darrenvechain@users.noreply.github.com>
dashangcun <907225865@qq.com>
Dave Appleton <calistralabs@gmail.com> Dave Appleton <calistralabs@gmail.com>
Dave McGregor <dave.s.mcgregor@gmail.com> Dave McGregor <dave.s.mcgregor@gmail.com>
David Cai <davidcai1993@yahoo.com> David Cai <davidcai1993@yahoo.com>
David Dzhalaev <72649244+DavidRomanovizc@users.noreply.github.com>
David Huie <dahuie@gmail.com> David Huie <dahuie@gmail.com>
David Murdoch <187813+davidmurdoch@users.noreply.github.com>
David Theodore <29786815+infosecual@users.noreply.github.com>
ddl <ddl196526@163.com>
Dean Eigenmann <7621705+decanus@users.noreply.github.com>
Delweng <delweng@gmail.com>
Denver <aeharvlee@gmail.com> Denver <aeharvlee@gmail.com>
Derek Chiang <me@derekchiang.com> Derek Chiang <me@derekchiang.com>
Derek Gottfrid <derek@codecubed.com> Derek Gottfrid <derek@codecubed.com>
deterclosed <164524498+deterclosed@users.noreply.github.com>
Devon Bear <itsdevbear@berachain.com>
Di Peng <pendyaaa@gmail.com> Di Peng <pendyaaa@gmail.com>
Diederik Loerakker <proto@protolambda.com> Diederik Loerakker <proto@protolambda.com>
Diego Siqueira <DiSiqueira@users.noreply.github.com> Diego Siqueira <DiSiqueira@users.noreply.github.com>
Diep Pham <mrfavadi@gmail.com> Diep Pham <mrfavadi@gmail.com>
Dimitris Apostolou <dimitris.apostolou@icloud.com>
dipingxian2 <39109351+dipingxian2@users.noreply.github.com> dipingxian2 <39109351+dipingxian2@users.noreply.github.com>
divergencetech <94644849+divergencetech@users.noreply.github.com> divergencetech <94644849+divergencetech@users.noreply.github.com>
dknopik <107140945+dknopik@users.noreply.github.com>
dm4 <sunrisedm4@gmail.com> dm4 <sunrisedm4@gmail.com>
Dmitrij Koniajev <dimchansky@gmail.com> Dmitrij Koniajev <dimchansky@gmail.com>
Dmitry Shulyak <yashulyak@gmail.com> Dmitry Shulyak <yashulyak@gmail.com>
Dmitry Zenovich <dzenovich@gmail.com> Dmitry Zenovich <dzenovich@gmail.com>
Domino Valdano <dominoplural@gmail.com> Domino Valdano <dominoplural@gmail.com>
DongXi Huang <418498589@qq.com>
Dragan Milic <dragan@netice9.com> Dragan Milic <dragan@netice9.com>
dragonvslinux <35779158+dragononcrypto@users.noreply.github.com> dragonvslinux <35779158+dragononcrypto@users.noreply.github.com>
Dylan Vassallo <dylan.vassallo@hotmail.com>
easyfold <137396765+easyfold@users.noreply.github.com>
Edgar Aroutiounian <edgar.factorial@gmail.com> Edgar Aroutiounian <edgar.factorial@gmail.com>
Eduard S <eduardsanou@posteo.net> Eduard S <eduardsanou@posteo.net>
Egon Elbre <egonelbre@gmail.com> Egon Elbre <egonelbre@gmail.com>
Elad <theman@elad.im> Elad <theman@elad.im>
Eli <elihanover@yahoo.com> Eli <elihanover@yahoo.com>
Elias Naur <elias.naur@gmail.com> Elias Naur <elias.naur@gmail.com>
Elias Rad <146735585+nnsW3@users.noreply.github.com>
Elliot Shepherd <elliot@identitii.com> Elliot Shepherd <elliot@identitii.com>
Emil <mursalimovemeel@gmail.com> Emil <mursalimovemeel@gmail.com>
emile <emile@users.noreply.github.com> emile <emile@users.noreply.github.com>
@ -151,11 +232,13 @@ Evgeny <awesome.observer@yandex.com>
Evgeny Danilenko <6655321@bk.ru> Evgeny Danilenko <6655321@bk.ru>
evgk <evgeniy.kamyshev@gmail.com> evgk <evgeniy.kamyshev@gmail.com>
Evolution404 <35091674+Evolution404@users.noreply.github.com> Evolution404 <35091674+Evolution404@users.noreply.github.com>
Exca-DK <85954505+Exca-DK@users.noreply.github.com>
EXEC <execvy@gmail.com> EXEC <execvy@gmail.com>
Fabian Vogelsteller <fabian@frozeman.de> Fabian Vogelsteller <fabian@frozeman.de>
Fabio Barone <fabio.barone.co@gmail.com> Fabio Barone <fabio.barone.co@gmail.com>
Fabio Berger <fabioberger1991@gmail.com> Fabio Berger <fabioberger1991@gmail.com>
FaceHo <facehoshi@gmail.com> FaceHo <facehoshi@gmail.com>
felipe <fselmo2@gmail.com>
Felipe Strozberg <48066928+FelStroz@users.noreply.github.com> Felipe Strozberg <48066928+FelStroz@users.noreply.github.com>
Felix Lange <fjl@twurst.com> Felix Lange <fjl@twurst.com>
Ferenc Szabo <frncmx@gmail.com> Ferenc Szabo <frncmx@gmail.com>
@ -163,68 +246,102 @@ ferhat elmas <elmas.ferhat@gmail.com>
Ferran Borreguero <ferranbt@protonmail.com> Ferran Borreguero <ferranbt@protonmail.com>
Fiisio <liangcszzu@163.com> Fiisio <liangcszzu@163.com>
Fire Man <55934298+basdevelop@users.noreply.github.com> Fire Man <55934298+basdevelop@users.noreply.github.com>
FletcherMan <fanciture@163.com>
flowerofdream <775654398@qq.com> flowerofdream <775654398@qq.com>
fomotrader <82184770+fomotrader@users.noreply.github.com> fomotrader <82184770+fomotrader@users.noreply.github.com>
Ford <153042616+guerrierindien@users.noreply.github.com>
ForLina <471133417@qq.com> ForLina <471133417@qq.com>
Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com> Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com>
Frank Wang <eternnoir@gmail.com> Frank Wang <eternnoir@gmail.com>
Franklin <mr_franklin@126.com> Franklin <mr_franklin@126.com>
Freeman Jiang <freeman.jiang.ca@gmail.com>
Furkan KAMACI <furkankamaci@gmail.com> Furkan KAMACI <furkankamaci@gmail.com>
Fuyang Deng <dengfuyang@outlook.com> Fuyang Deng <dengfuyang@outlook.com>
GagziW <leon.stanko@rwth-aachen.de> GagziW <leon.stanko@rwth-aachen.de>
Gary Rong <garyrong0905@gmail.com> Gary Rong <garyrong0905@gmail.com>
Gautam Botrel <gautam.botrel@gmail.com> Gautam Botrel <gautam.botrel@gmail.com>
Gealber Morales <48373523+Gealber@users.noreply.github.com>
George Ma <164313692+availhang@users.noreply.github.com>
George Ornbo <george@shapeshed.com> George Ornbo <george@shapeshed.com>
georgehao <haohongfan@gmail.com>
gitglorythegreat <t4juu3@proton.me>
Giuseppe Bertone <bertone.giuseppe@gmail.com> Giuseppe Bertone <bertone.giuseppe@gmail.com>
Greg Colvin <greg@colvin.org> Greg Colvin <greg@colvin.org>
Gregg Dourgarian <greggd@tempworks.com> Gregg Dourgarian <greggd@tempworks.com>
Gregory Markou <16929357+GregTheGreek@users.noreply.github.com> Gregory Markou <16929357+GregTheGreek@users.noreply.github.com>
guangwu <guoguangwu@magic-shield.com>
Guido Vranken <guidovranken@users.noreply.github.com>
Guifel <toowik@gmail.com> Guifel <toowik@gmail.com>
Guilherme Salgado <gsalgado@gmail.com> Guilherme Salgado <gsalgado@gmail.com>
Guillaume Ballet <gballet@gmail.com> Guillaume Ballet <gballet@gmail.com>
Guillaume Michel <guillaumemichel@users.noreply.github.com>
Guillaume Nicolas <guin56@gmail.com> Guillaume Nicolas <guin56@gmail.com>
GuiltyMorishita <morilliantblue@gmail.com> GuiltyMorishita <morilliantblue@gmail.com>
Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com> Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com>
Gus <yo@soygus.com> Gus <yo@soygus.com>
Gustav Simonsson <gustav.simonsson@gmail.com> Gustav Simonsson <gustav.simonsson@gmail.com>
Gustavo Silva <GustavoRSSilva@users.noreply.github.com>
Gísli Kristjánsson <gislik@hamstur.is> Gísli Kristjánsson <gislik@hamstur.is>
Ha ĐANG <dvietha@gmail.com> Ha ĐANG <dvietha@gmail.com>
HackyMiner <hackyminer@gmail.com> HackyMiner <hackyminer@gmail.com>
hadv <dvietha@gmail.com> Halimao <1065621723@qq.com>
Hanjiang Yu <delacroix.yu@gmail.com> Hanjiang Yu <delacroix.yu@gmail.com>
Hao Bryan Cheng <haobcheng@gmail.com> Hao Bryan Cheng <haobcheng@gmail.com>
Hao Duan <duanhao0814@gmail.com> Hao Duan <duanhao0814@gmail.com>
haoran <159284258+hr98w@users.noreply.github.com>
Haotian <51777534+tmelhao@users.noreply.github.com>
HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com> HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Harry Dutton <me@bytejedi.com> Harry Dutton <me@bytejedi.com>
Harry Kalodner <harry.kalodner@gmail.com>
haryu703 <34744512+haryu703@users.noreply.github.com> haryu703 <34744512+haryu703@users.noreply.github.com>
hattizai <hattizai@gmail.com>
Hendrik Hofstadt <hendrik@nexantic.com> Hendrik Hofstadt <hendrik@nexantic.com>
Henning Diedrich <hd@eonblast.com> Henning Diedrich <hd@eonblast.com>
henopied <13500516+henopied@users.noreply.github.com> henopied <13500516+henopied@users.noreply.github.com>
henridf <henri@dubfer.com>
Henry <101552941+henry-0@users.noreply.github.com>
hero5512 <lvshuaino@gmail.com> hero5512 <lvshuaino@gmail.com>
holisticode <holistic.computing@gmail.com> holisticode <holistic.computing@gmail.com>
Hongbin Mao <hello2mao@gmail.com> Hongbin Mao <hello2mao@gmail.com>
Hsien-Tang Kao <htkao@pm.me> Hsien-Tang Kao <htkao@pm.me>
hsyodyssey <47173566+hsyodyssey@users.noreply.github.com> hsyodyssey <47173566+hsyodyssey@users.noreply.github.com>
Hteev Oli <gethorz@proton.me>
Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com> Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com> Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
hydai <z54981220@gmail.com> hydai <z54981220@gmail.com>
hyhnet <cyrusyun@qq.com>
hyunchel <3271191+hyunchel@users.noreply.github.com>
Hyung-Kyu Hqueue Choi <hyungkyu.choi@gmail.com> Hyung-Kyu Hqueue Choi <hyungkyu.choi@gmail.com>
Hyunsoo Shin (Lake) <hyunsooda@kaist.ac.kr>
hzysvilla <ecjgvmhc@gmail.com>
Håvard Anda Estensen <haavard.ae@gmail.com> Håvard Anda Estensen <haavard.ae@gmail.com>
Ian Macalinao <me@ian.pw> Ian Macalinao <me@ian.pw>
Ian Norden <iannordenn@gmail.com> Ian Norden <iannordenn@gmail.com>
Icarus Wu <icaruswu66@qq.com>
icodezjb <icodezjb@163.com> icodezjb <icodezjb@163.com>
Ikko Ashimine <eltociear@gmail.com> ids <tonyhaha163@163.com>
Ignacio Hagopian <jsign.uy@gmail.com>
Ikko Eltociear Ashimine <eltociear@gmail.com>
Ilan Gitter <8359193+gitteri@users.noreply.github.com> Ilan Gitter <8359193+gitteri@users.noreply.github.com>
imalasong <55082705+imalasong@users.noreply.github.com>
ImanSharaf <78227895+ImanSharaf@users.noreply.github.com> ImanSharaf <78227895+ImanSharaf@users.noreply.github.com>
imulmat4 <117636097+imulmat4@users.noreply.github.com>
Inphi <mlaw2501@gmail.com>
int88 <106391185+int88@users.noreply.github.com>
Isidoro Ghezzi <isidoro.ghezzi@icloud.com> Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> Iskander (Alex) Sharipov <quasilyte@gmail.com>
Ivan Aracki <aracki.ivan@gmail.com>
Ivan Bogatyy <bogatyi@gmail.com> Ivan Bogatyy <bogatyi@gmail.com>
Ivan Daniluk <ivan.daniluk@gmail.com> Ivan Daniluk <ivan.daniluk@gmail.com>
Ivo Georgiev <ivo@strem.io> Ivo Georgiev <ivo@strem.io>
j2gg0s <j2gg0s@gmail.com>
jacksoom <lifengliu1994@gmail.com> jacksoom <lifengliu1994@gmail.com>
jackyin <648588267@qq.com>
Jae Kwon <jkwon.work@gmail.com> Jae Kwon <jkwon.work@gmail.com>
James Prestwich <10149425+prestwich@users.noreply.github.com> Jakub Freebit <49676311+jakub-freebit@users.noreply.github.com>
James Prestwich <james@prestwi.ch>
Jamie Pitts <james.pitts@gmail.com> Jamie Pitts <james.pitts@gmail.com>
Janko Simonovic <simonovic86@gmail.com>
Janoš Guljaš <janos@resenje.org> Janoš Guljaš <janos@resenje.org>
Jared Wasinger <j-wasinger@hotmail.com> Jared Wasinger <j-wasinger@hotmail.com>
Jason Carver <jacarver@linkedin.com> Jason Carver <jacarver@linkedin.com>
@ -239,42 +356,63 @@ Jeff Wentworth <jeff@curvegrid.com>
Jeffery Robert Walsh <rlxrlps@gmail.com> Jeffery Robert Walsh <rlxrlps@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> Jeffrey Wilcke <jeffrey@ethereum.org>
Jens Agerberg <github@agerberg.me> Jens Agerberg <github@agerberg.me>
Jens W <8270201+DragonDev1906@users.noreply.github.com>
Jeremy McNevin <jeremy.mcnevin@optum.com> Jeremy McNevin <jeremy.mcnevin@optum.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com> Jeremy Schlatter <jeremy.schlatter@gmail.com>
Jerzy Lasyk <jerzylasyk@gmail.com> Jerzy Lasyk <jerzylasyk@gmail.com>
Jesse Tane <jesse.tane@gmail.com> Jesse Tane <jesse.tane@gmail.com>
Jia Chenhui <jiachenhui1989@gmail.com> Jia Chenhui <jiachenhui1989@gmail.com>
Jim McDonald <Jim@mcdee.net> Jim McDonald <Jim@mcdee.net>
jin <35813306+lochjin@users.noreply.github.com>
jk-jeongkyun <45347815+jeongkyun-oh@users.noreply.github.com> jk-jeongkyun <45347815+jeongkyun-oh@users.noreply.github.com>
jkcomment <jkcomment@gmail.com> jkcomment <jkcomment@gmail.com>
Joe Netti <joe@netti.dev>
JoeGruffins <34998433+JoeGruffins@users.noreply.github.com> JoeGruffins <34998433+JoeGruffins@users.noreply.github.com>
Joel Burget <joelburget@gmail.com> Joel Burget <joelburget@gmail.com>
John C. Vernaleo <john@netpurgatory.com> John C. Vernaleo <john@netpurgatory.com>
John Chase <68833933+joohhnnn@users.noreply.github.com>
John Difool <johndifoolpi@gmail.com> John Difool <johndifoolpi@gmail.com>
John Hilliard <jhilliard@polygon.technology>
John Xu <dyxushuai@gmail.com>
Johns Beharry <johns@peakshift.com> Johns Beharry <johns@peakshift.com>
Jolly Zhao <zhaolei@pm.me>
Jonas <felberj@users.noreply.github.com> Jonas <felberj@users.noreply.github.com>
Jonathan Brown <jbrown@bluedroplet.com> Jonathan Brown <jbrown@bluedroplet.com>
Jonathan Chappelow <chappjc@users.noreply.github.com> Jonathan Chappelow <chappjc@users.noreply.github.com>
Jonathan Gimeno <jgimeno@gmail.com> Jonathan Gimeno <jgimeno@gmail.com>
Jonathan Otto <jonathan.otto@gmail.com>
JoranHonig <JoranHonig@users.noreply.github.com> JoranHonig <JoranHonig@users.noreply.github.com>
Jordan Krage <jmank88@gmail.com> Jordan Krage <jmank88@gmail.com>
Jorge <jorgeacortes@users.noreply.github.com>
Jorropo <jorropo.pgm@gmail.com> Jorropo <jorropo.pgm@gmail.com>
Joseph Chow <ethereum@outlook.com> Joseph Chow <ethereum@outlook.com>
Joseph Cook <33655003+jmcook1186@users.noreply.github.com>
Joshua Colvin <jcolvin@offchainlabs.com> Joshua Colvin <jcolvin@offchainlabs.com>
Joshua Gutow <jbgutow@gmail.com> Joshua Gutow <jbgutow@gmail.com>
jovijovi <mageyul@hotmail.com> jovijovi <mageyul@hotmail.com>
jp-imx <109574657+jp-imx@users.noreply.github.com>
jtakalai <juuso.takalainen@streamr.com> jtakalai <juuso.takalainen@streamr.com>
JU HYEONG PARK <dkdkajej@gmail.com> JU HYEONG PARK <dkdkajej@gmail.com>
Julian Y <jyap808@users.noreply.github.com> Julian Y <jyap808@users.noreply.github.com>
Justin Clark-Casey <justincc@justincc.org> Justin Clark-Casey <justincc@justincc.org>
Justin Dhillon <justin.singh.dhillon@gmail.com>
Justin Drake <drakefjustin@gmail.com> Justin Drake <drakefjustin@gmail.com>
Justin Traglia <95511699+jtraglia@users.noreply.github.com>
Justus <jus@gtsbr.org> Justus <jus@gtsbr.org>
KAI <35927054+ThreeAndTwo@users.noreply.github.com>
kaliubuntu0206 <139627505+kaliubuntu0206@users.noreply.github.com>
Karl Bartel <karl.bartel@clabs.co>
Karol Chojnowski <karolchojnowski95@gmail.com>
Kawashima <91420903+sscodereth@users.noreply.github.com> Kawashima <91420903+sscodereth@users.noreply.github.com>
kazak <alright-epsilon8h@icloud.com>
ken10100147 <sunhongping@kanjian.com> ken10100147 <sunhongping@kanjian.com>
Kenji Siu <kenji@isuntv.com> Kenji Siu <kenji@isuntv.com>
Kenso Trabing <ktrabing@acm.org> Kenso Trabing <ktrabing@acm.org>
Kero <keroroxx520@gmail.com>
kevaundray <kevtheappdev@gmail.com>
Kevin <denk.kevin@web.de> Kevin <denk.kevin@web.de>
kevin.xu <cming.xu@gmail.com> kevin.xu <cming.xu@gmail.com>
Kiarash Hajian <133909368+kiarash8112@users.noreply.github.com>
KibGzr <kibgzr@gmail.com> KibGzr <kibgzr@gmail.com>
kiel barry <kiel.j.barry@gmail.com> kiel barry <kiel.j.barry@gmail.com>
kilic <onurkilic1004@gmail.com> kilic <onurkilic1004@gmail.com>
@ -282,8 +420,10 @@ kimmylin <30611210+kimmylin@users.noreply.github.com>
Kitten King <53072918+kittenking@users.noreply.github.com> Kitten King <53072918+kittenking@users.noreply.github.com>
knarfeh <hejun1874@gmail.com> knarfeh <hejun1874@gmail.com>
Kobi Gurkan <kobigurk@gmail.com> Kobi Gurkan <kobigurk@gmail.com>
Koichi Shiraishi <zchee.io@gmail.com>
komika <komika@komika.org> komika <komika@komika.org>
Konrad Feldmeier <konrad@brainbot.com> Konrad Feldmeier <konrad@brainbot.com>
Kosuke Taniguchi <73885532+TaniguchiKosuke@users.noreply.github.com>
Kris Shinn <raggamuffin.music@gmail.com> Kris Shinn <raggamuffin.music@gmail.com>
Kristofer Peterson <svenski123@users.noreply.github.com> Kristofer Peterson <svenski123@users.noreply.github.com>
Kumar Anirudha <mail@anirudha.dev> Kumar Anirudha <mail@anirudha.dev>
@ -296,6 +436,8 @@ Lefteris Karapetsas <lefteris@refu.co>
Leif Jurvetson <leijurv@gmail.com> Leif Jurvetson <leijurv@gmail.com>
Leo Shklovskii <leo@thermopylae.net> Leo Shklovskii <leo@thermopylae.net>
LeoLiao <leofantast@gmail.com> LeoLiao <leofantast@gmail.com>
Leon <316032931@qq.com>
levisyin <150114626+levisyin@users.noreply.github.com>
Lewis Marshall <lewis@lmars.net> Lewis Marshall <lewis@lmars.net>
lhendre <lhendre2@gmail.com> lhendre <lhendre2@gmail.com>
Li Dongwei <lidw1988@126.com> Li Dongwei <lidw1988@126.com>
@ -305,36 +447,58 @@ libby kent <viskovitzzz@gmail.com>
libotony <liboliqi@gmail.com> libotony <liboliqi@gmail.com>
LieutenantRoger <dijsky_2015@hotmail.com> LieutenantRoger <dijsky_2015@hotmail.com>
ligi <ligi@ligi.de> ligi <ligi@ligi.de>
lilasxie <thanklilas@163.com>
Lindlof <mikael@lindlof.io>
Lio李欧 <lionello@users.noreply.github.com> Lio李欧 <lionello@users.noreply.github.com>
lmittmann <lmittmann@users.noreply.github.com> Liyi Guo <102356659+colinlyguo@users.noreply.github.com>
llkhacquan <3724362+llkhacquan@users.noreply.github.com>
lmittmann <3458786+lmittmann@users.noreply.github.com>
lorenzo <31852651+lorenzo-dev1@users.noreply.github.com>
Lorenzo Manacorda <lorenzo@kinvolk.io> Lorenzo Manacorda <lorenzo@kinvolk.io>
Louis Holbrook <dev@holbrook.no> Louis Holbrook <dev@holbrook.no>
Luca Zeug <luclu@users.noreply.github.com> Luca Zeug <luclu@users.noreply.github.com>
Lucas <lucaslg360@gmail.com>
Lucas Hendren <lhendre2@gmail.com> Lucas Hendren <lhendre2@gmail.com>
Luozhu <70309026+LuozhuZhang@users.noreply.github.com>
lwh <lwhile521@gmail.com>
lzhfromustc <43191155+lzhfromustc@users.noreply.github.com> lzhfromustc <43191155+lzhfromustc@users.noreply.github.com>
Maciej Kulawik <10907694+magicxyyz@users.noreply.github.com>
Madhur Shrimal <madhur.shrimal@gmail.com>
Magicking <s@6120.eu> Magicking <s@6120.eu>
makcandrov <makcandrov@proton.me>
manlio <manlio.poltronieri@gmail.com> manlio <manlio.poltronieri@gmail.com>
Manoj Kumar <mnjkmr398@gmail.com>
Maran Hidskes <maran.hidskes@gmail.com> Maran Hidskes <maran.hidskes@gmail.com>
Marcin Sobczak <77129288+marcindsobczak@users.noreply.github.com>
Marcus Baldassarre <baldassarremarcus@gmail.com>
Marek Kotewicz <marek.kotewicz@gmail.com> Marek Kotewicz <marek.kotewicz@gmail.com>
Mariano Cortesi <mcortesi@gmail.com> Mariano Cortesi <mcortesi@gmail.com>
Mario Vega <marioevz@gmail.com>
Marius G <90795310+bearpebble@users.noreply.github.com>
Marius Kjærstad <sandakersmann@users.noreply.github.com>
Marius van der Wijden <m.vanderwijden@live.de> Marius van der Wijden <m.vanderwijden@live.de>
Mark <markya0616@gmail.com> Mark <markya0616@gmail.com>
Mark Rushakoff <mark.rushakoff@gmail.com> Mark Rushakoff <mark.rushakoff@gmail.com>
Mark Tyneway <mark.tyneway@gmail.com>
mark.lin <mark@maicoin.com> mark.lin <mark@maicoin.com>
markus <55011443+mdymalla@users.noreply.github.com>
Marquis Shanahan <29431502+9547@users.noreply.github.com>
Martin Alex Philip Dawson <u1356770@gmail.com> Martin Alex Philip Dawson <u1356770@gmail.com>
Martin Holst Swende <martin@swende.se> Martin Holst Swende <martin@swende.se>
Martin Klepsch <martinklepsch@googlemail.com> Martin Klepsch <martinklepsch@googlemail.com>
Martin Lundfall <martin.lundfall@protonmail.com> Martin Lundfall <martin.lundfall@protonmail.com>
Martin Michlmayr <tbm@cyrius.com> Martin Michlmayr <tbm@cyrius.com>
Martin Redmond <21436+reds@users.noreply.github.com> Martin Redmond <21436+reds@users.noreply.github.com>
maskpp <maskpp266@gmail.com>
Mason Fischer <mason@kissr.co> Mason Fischer <mason@kissr.co>
Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com> Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com>
Mats Julian Olsen <mats@plysjbyen.net> Mats Julian Olsen <mats@plysjbyen.net>
Matt Garnett <14004106+lightclient@users.noreply.github.com> Matt Garnett <lightclient@protonmail.com>
Matt K <1036969+mkrump@users.noreply.github.com> Matt K <1036969+mkrump@users.noreply.github.com>
Matthew Di Ferrante <mattdf@users.noreply.github.com> Matthew Di Ferrante <mattdf@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com> Matthew Halpern <matthalp@gmail.com>
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com> Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
Matthieu Vachon <matt@streamingfast.io>
Max Sistemich <mafrasi2@googlemail.com> Max Sistemich <mafrasi2@googlemail.com>
Maxim Zhiburt <zhiburt@gmail.com> Maxim Zhiburt <zhiburt@gmail.com>
Maximilian Meister <mmeister@suse.de> Maximilian Meister <mmeister@suse.de>
@ -342,34 +506,55 @@ me020523 <me020523@gmail.com>
Melvin Junhee Woo <melvin.woo@groundx.xyz> Melvin Junhee Woo <melvin.woo@groundx.xyz>
meowsbits <b5c6@protonmail.com> meowsbits <b5c6@protonmail.com>
Micah Zoltu <micah@zoltu.net> Micah Zoltu <micah@zoltu.net>
Michael de Hoog <michael.dehoog@gmail.com>
Michael Forney <mforney@mforney.org> Michael Forney <mforney@mforney.org>
Michael Riabzev <michael@starkware.co> Michael Riabzev <michael@starkware.co>
Michael Ruminer <michael.ruminer+github@gmail.com> Michael Ruminer <michael.ruminer+github@gmail.com>
michael1011 <me@michael1011.at> michael1011 <me@michael1011.at>
Miguel Mota <miguelmota2@gmail.com> Miguel Mota <miguelmota2@gmail.com>
Mike Burr <mburr@nightmare.com> Mike Burr <mburr@nightmare.com>
Mikel Cortes <45786396+cortze@users.noreply.github.com>
Mikhail Mikheev <mmvsha73@gmail.com> Mikhail Mikheev <mmvsha73@gmail.com>
Mikhail Vazhnov <michael.vazhnov@gmail.com>
miles <66052478+miles-six@users.noreply.github.com>
Miles Chen <fearlesschenc@gmail.com>
milesvant <milesvant@gmail.com> milesvant <milesvant@gmail.com>
minh-bq <97180373+minh-bq@users.noreply.github.com>
Mio <mshimmeris@gmail.com>
Miro <mirokuratczyk@users.noreply.github.com> Miro <mirokuratczyk@users.noreply.github.com>
Miya Chen <miyatlchen@gmail.com> Miya Chen <miyatlchen@gmail.com>
mmsqe <mavis@crypto.com>
Mobin Mohanan <47410557+tr1sm0s1n@users.noreply.github.com>
Mohanson <mohanson@outlook.com> Mohanson <mohanson@outlook.com>
moomin <67548026+nothingmin@users.noreply.github.com>
mr_franklin <mr_franklin@126.com> mr_franklin <mr_franklin@126.com>
Mskxn <118117161+Mskxn@users.noreply.github.com>
Mudit Gupta <guptamudit@ymail.com> Mudit Gupta <guptamudit@ymail.com>
Mymskmkt <1847234666@qq.com> Mymskmkt <1847234666@qq.com>
Nalin Bhardwaj <nalinbhardwaj@nibnalin.me> Nalin Bhardwaj <nalinbhardwaj@nibnalin.me>
nand2 <nicolas@deschildre.fr>
Nathan <Nathan.l@nodereal.io>
Nathan Jo <162083209+qqqeck@users.noreply.github.com>
Natsu Kagami <natsukagami@gmail.com> Natsu Kagami <natsukagami@gmail.com>
Naveen <116692862+naveen-imtb@users.noreply.github.com>
Nchinda Nchinda <nchinda2@gmail.com> Nchinda Nchinda <nchinda2@gmail.com>
nebojsa94 <nebojsa94@users.noreply.github.com> Nebojsa Urosevic <nebojsa94@users.noreply.github.com>
necaremus <necaremus@gmail.com> necaremus <necaremus@gmail.com>
nedifi <103940716+nedifi@users.noreply.github.com> nedifi <103940716+nedifi@users.noreply.github.com>
needkane <604476380@qq.com> needkane <604476380@qq.com>
Newt6611 <45097780+Newt6611@users.noreply.github.com>
Ng Wei Han <47109095+weiihann@users.noreply.github.com>
Nguyen Kien Trung <trung.n.k@gmail.com> Nguyen Kien Trung <trung.n.k@gmail.com>
Nguyen Sy Thanh Son <thanhson1085@gmail.com> Nguyen Sy Thanh Son <thanhson1085@gmail.com>
Nic Jansma <nic@nicj.net> Nic Jansma <nic@nicj.net>
Nicholas <nicholas.zhaoyu@gmail.com>
Nick Dodson <silentcicero@outlook.com> Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net> Nick Johnson <arachnid@notdot.net>
Nicola Cocchiaro <3538109+ncocchiaro@users.noreply.github.com>
Nicolas Feignon <nfeignon@gmail.com> Nicolas Feignon <nfeignon@gmail.com>
Nicolas Gotchac <ngotchac@gmail.com>
Nicolas Guillaume <gunicolas@sqli.com> Nicolas Guillaume <gunicolas@sqli.com>
Nikhil Suri <nikhilsuri@comcast.net>
Nikita Kozhemyakin <enginegl.ec@gmail.com> Nikita Kozhemyakin <enginegl.ec@gmail.com>
Nikola Madjarevic <nikola.madjarevic@gmail.com> Nikola Madjarevic <nikola.madjarevic@gmail.com>
Nilesh Trivedi <nilesh@hypertrack.io> Nilesh Trivedi <nilesh@hypertrack.io>
@ -379,32 +564,47 @@ njupt-moon <1015041018@njupt.edu.cn>
nkbai <nkbai@163.com> nkbai <nkbai@163.com>
noam-alchemy <76969113+noam-alchemy@users.noreply.github.com> noam-alchemy <76969113+noam-alchemy@users.noreply.github.com>
nobody <ddean2009@163.com> nobody <ddean2009@163.com>
noel <72006780+0x00Duke@users.noreply.github.com>
Noman <noman@noman.land> Noman <noman@noman.land>
norwnd <112318969+norwnd@users.noreply.github.com>
nujabes403 <nujabes403@gmail.com> nujabes403 <nujabes403@gmail.com>
Nye Liu <nyet@nyet.org> Nye Liu <nyet@nyet.org>
Obtuse7772 <117080049+Obtuse7772@users.noreply.github.com>
Oleg Kovalov <iamolegkovalov@gmail.com> Oleg Kovalov <iamolegkovalov@gmail.com>
Oli Bye <olibye@users.noreply.github.com> Oli Bye <olibye@users.noreply.github.com>
Oliver Tale-Yazdi <oliver@perun.network> Oliver Tale-Yazdi <oliver@perun.network>
Olivier Hervieu <olivier.hervieu@gmail.com> Olivier Hervieu <olivier.hervieu@gmail.com>
openex <openexkevin@gmail.com>
Or Neeman <oneeman@gmail.com> Or Neeman <oneeman@gmail.com>
oseau <harbin.kyang@gmail.com>
Osoro Bironga <fanosoro@gmail.com> Osoro Bironga <fanosoro@gmail.com>
Osuke <arget-fee.free.dgm@hotmail.co.jp> Osuke <arget-fee.free.dgm@hotmail.co.jp>
panicalways <113693386+panicalways@users.noreply.github.com>
Pantelis Peslis <pespantelis@gmail.com> Pantelis Peslis <pespantelis@gmail.com>
Parithosh Jayanthi <parithosh@indenwolken.xyz>
Park Changwan <pcw109550@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz> Pascal Dierich <pascal@merkleplant.xyz>
Patrick O'Grady <prohb125@gmail.com> Patrick O'Grady <prohb125@gmail.com>
Pau <pau@dabax.net> Pau <pau@dabax.net>
Paul <41552663+molecula451@users.noreply.github.com>
Paul Berg <hello@paulrberg.com> Paul Berg <hello@paulrberg.com>
Paul Lange <palango@users.noreply.github.com>
Paul Litvak <litvakpol@012.net.il> Paul Litvak <litvakpol@012.net.il>
Paul-Armand Verhaegen <paularmand.verhaegen@gmail.com> Paul-Armand Verhaegen <paularmand.verhaegen@gmail.com>
Paulo L F Casaretto <pcasaretto@gmail.com> Paulo L F Casaretto <pcasaretto@gmail.com>
Pawan Dhananjay <pawandhananjay@gmail.com>
Paweł Bylica <chfast@gmail.com> Paweł Bylica <chfast@gmail.com>
Pedro Gomes <otherview@gmail.com> Pedro Gomes <otherview@gmail.com>
Pedro Pombeiro <PombeirP@users.noreply.github.com> Pedro Pombeiro <PombeirP@users.noreply.github.com>
persmor <166146971+persmor@users.noreply.github.com>
Peter (bitfly) <1674920+peterbitfly@users.noreply.github.com>
Peter Broadhurst <peter@themumbles.net> Peter Broadhurst <peter@themumbles.net>
peter cresswell <pcresswell@gmail.com> peter cresswell <pcresswell@gmail.com>
Peter Pratscher <pratscher@gmail.com> Peter Pratscher <pratscher@gmail.com>
Peter Simard <petesimard56@gmail.com> Peter Simard <petesimard56@gmail.com>
Peter Straus <153843855+krauspt@users.noreply.github.com>
Petr Mikusek <petr@mikusek.info> Petr Mikusek <petr@mikusek.info>
phenix3443 <phenix3443@gmail.com>
Philip Schlump <pschlump@gmail.com> Philip Schlump <pschlump@gmail.com>
Pierre Neter <pierreneter@gmail.com> Pierre Neter <pierreneter@gmail.com>
Pierre R <p.rousset@gmail.com> Pierre R <p.rousset@gmail.com>
@ -412,15 +612,24 @@ piersy <pierspowlesland@gmail.com>
PilkyuJung <anothel1@naver.com> PilkyuJung <anothel1@naver.com>
Piotr Dyraga <piotr.dyraga@keep.network> Piotr Dyraga <piotr.dyraga@keep.network>
ploui <64719999+ploui@users.noreply.github.com> ploui <64719999+ploui@users.noreply.github.com>
PolyMa <151764357+polymaer@users.noreply.github.com>
Preston Van Loon <preston@prysmaticlabs.com> Preston Van Loon <preston@prysmaticlabs.com>
Prince Sinha <sinhaprince013@gmail.com> Prince Sinha <sinhaprince013@gmail.com>
psogv0308 <psogv0308@gmail.com>
puhtaytow <18026645+puhtaytow@users.noreply.github.com>
Péter Szilágyi <peterke@gmail.com> Péter Szilágyi <peterke@gmail.com>
qcrao <qcrao91@gmail.com>
qd-ethan <31876119+qdgogogo@users.noreply.github.com> qd-ethan <31876119+qdgogogo@users.noreply.github.com>
Qian Bin <cola.tin.com@gmail.com> Qian Bin <cola.tin.com@gmail.com>
qiuhaohao <trouserrr@gmail.com>
Qt <golang.chen@gmail.com>
Quentin McGaw <quentin.mcgaw@gmail.com>
Quest Henkart <qhenkart@gmail.com> Quest Henkart <qhenkart@gmail.com>
Rachel Bousfield <nfranks@protonmail.com>
Rachel Franks <nfranks@protonmail.com> Rachel Franks <nfranks@protonmail.com>
Rafael Matias <rafael@skyle.net> Rafael Matias <rafael@skyle.net>
Raghav Sood <raghavsood@gmail.com> Raghav Sood <raghavsood@gmail.com>
Rajaram Gaunker <zimbabao@gmail.com>
Ralph Caraveo <deckarep@gmail.com> Ralph Caraveo <deckarep@gmail.com>
Ramesh Nair <ram@hiddentao.com> Ramesh Nair <ram@hiddentao.com>
rangzen <public@l-homme.com> rangzen <public@l-homme.com>
@ -430,45 +639,65 @@ rhaps107 <dod-source@yandex.ru>
Ricardo Catalinas Jiménez <r@untroubled.be> Ricardo Catalinas Jiménez <r@untroubled.be>
Ricardo Domingos <ricardohsd@gmail.com> Ricardo Domingos <ricardohsd@gmail.com>
Richard Hart <richardhart92@gmail.com> Richard Hart <richardhart92@gmail.com>
RichΛrd <info@richardramos.me>
Rick <rick.no@groundx.xyz> Rick <rick.no@groundx.xyz>
RJ Catalano <catalanor0220@gmail.com> RJ Catalano <catalanor0220@gmail.com>
Rob <robert@rojotek.com> Rob <robert@rojotek.com>
Rob Mulholand <rmulholand@8thlight.com> Rob Mulholand <rmulholand@8thlight.com>
Robert Zaremba <robert@zaremba.ch> Robert Zaremba <robert@zaremba.ch>
Roberto Bayardo <bayardo@alum.mit.edu>
Roc Yu <rociiu0112@gmail.com> Roc Yu <rociiu0112@gmail.com>
Roman Krasiuk <rokrassyuk@gmail.com>
Roman Mazalov <83914728+gopherxyz@users.noreply.github.com> Roman Mazalov <83914728+gopherxyz@users.noreply.github.com>
Ross <9055337+Chadsr@users.noreply.github.com> Ross <9055337+Chadsr@users.noreply.github.com>
Rossen Krastev <rosen4obg@gmail.com>
Roy Crihfield <roy@manteia.ltd>
Runchao Han <elvisage941102@gmail.com> Runchao Han <elvisage941102@gmail.com>
Ruohui Wang <nomaru@outlook.com>
Russ Cox <rsc@golang.org> Russ Cox <rsc@golang.org>
Ryan Schneider <ryanleeschneider@gmail.com> Ryan Schneider <ryanleeschneider@gmail.com>
Ryan Tinianov <tinianov@live.com>
ryanc414 <ryan@tokencard.io> ryanc414 <ryan@tokencard.io>
Rémy Roy <remyroy@remyroy.com> Rémy Roy <remyroy@remyroy.com>
S. Matthew English <s-matthew-english@users.noreply.github.com> S. Matthew English <s-matthew-english@users.noreply.github.com>
salanfe <salanfe@users.noreply.github.com> salanfe <salanfe@users.noreply.github.com>
Sam <39165351+Xia-Sam@users.noreply.github.com> Sam <39165351+Xia-Sam@users.noreply.github.com>
Saman H. Pasha <51169592+saman-pasha@users.noreply.github.com>
Sammy Libre <7374093+sammy007@users.noreply.github.com> Sammy Libre <7374093+sammy007@users.noreply.github.com>
Samuel Marks <samuelmarks@gmail.com> Samuel Marks <samuelmarks@gmail.com>
Sanghee Choi <32831939+pengin7384@users.noreply.github.com>
SangIlMo <156392700+SangIlMo@users.noreply.github.com>
sanskarkhare <sanskarkhare47@gmail.com> sanskarkhare <sanskarkhare47@gmail.com>
SanYe <kumakichi@users.noreply.github.com>
Sarlor <kinsleer@outlook.com> Sarlor <kinsleer@outlook.com>
Sasuke1964 <neilperry1964@gmail.com> Sasuke1964 <neilperry1964@gmail.com>
Satpal <28562234+SatpalSandhu61@users.noreply.github.com> Satpal <28562234+SatpalSandhu61@users.noreply.github.com>
Saulius Grigaitis <saulius@necolt.com> Saulius Grigaitis <saulius@necolt.com>
Sean <darcys22@gmail.com> Sean <darcys22@gmail.com>
seayyyy <163325936+seay404@users.noreply.github.com>
Sebastian Stammler <seb@oplabs.co>
Serhat Şevki Dinçer <jfcgauss@gmail.com> Serhat Şevki Dinçer <jfcgauss@gmail.com>
Seungbae Yu <dbadoy4874@gmail.com>
Seungmin Kim <a7965344@gmail.com>
Shane Bammel <sjb933@gmail.com> Shane Bammel <sjb933@gmail.com>
shawn <36943337+lxex@users.noreply.github.com> shawn <36943337+lxex@users.noreply.github.com>
shigeyuki azuchi <azuchi@chaintope.com> shigeyuki azuchi <azuchi@chaintope.com>
Shihao Xia <charlesxsh@hotmail.com> Shihao Xia <charlesxsh@hotmail.com>
Shiming <codingmylife@gmail.com> Shiming <codingmylife@gmail.com>
Shiming Zhang <wzshiming@hotmail.com>
Shintaro Kaneko <kaneshin0120@gmail.com> Shintaro Kaneko <kaneshin0120@gmail.com>
shiqinfeng1 <150627601@qq.com> shiqinfeng1 <150627601@qq.com>
Shivam Sandbhor <shivam.sandbhor@gmail.com>
shivhg <shivhg@gmail.com>
Shuai Qi <qishuai231@gmail.com> Shuai Qi <qishuai231@gmail.com>
Shude Li <islishude@gmail.com> Shude Li <islishude@gmail.com>
Shunsuke Watanabe <ww.shunsuke@gmail.com> Shunsuke Watanabe <ww.shunsuke@gmail.com>
shuo <shuoli84@gmail.com>
silence <wangsai.silence@qq.com> silence <wangsai.silence@qq.com>
Simon Jentzsch <simon@slock.it> Simon Jentzsch <simon@slock.it>
Sina Mahmoodi <1591639+s1na@users.noreply.github.com> Sina Mahmoodi <1591639+s1na@users.noreply.github.com>
sixdays <lj491685571@126.com> sixdays <lj491685571@126.com>
sjlee1125 <47561537+sjlee1125@users.noreply.github.com>
SjonHortensius <SjonHortensius@users.noreply.github.com> SjonHortensius <SjonHortensius@users.noreply.github.com>
Slava Karpenko <slavikus@gmail.com> Slava Karpenko <slavikus@gmail.com>
slumber1122 <slumber1122@gmail.com> slumber1122 <slumber1122@gmail.com>
@ -477,17 +706,29 @@ soc1c <soc1c@users.noreply.github.com>
Sorin Neacsu <sorin.neacsu@gmail.com> Sorin Neacsu <sorin.neacsu@gmail.com>
Sparty <vignesh.crysis@gmail.com> Sparty <vignesh.crysis@gmail.com>
Stein Dekker <dekker.stein@gmail.com> Stein Dekker <dekker.stein@gmail.com>
Stephen Flynn <ssflynn@gmail.com>
Stephen Guo <stephen.fire@gmail.com>
Steve Gattuso <steve@stevegattuso.me> Steve Gattuso <steve@stevegattuso.me>
Steve Milk <wangpeculiar@gmail.com>
Steve Ruckdashel <steve.ruckdashel@gmail.com> Steve Ruckdashel <steve.ruckdashel@gmail.com>
Steve Waldman <swaldman@mchange.com> Steve Waldman <swaldman@mchange.com>
Steven E. Harris <seh@panix.com> Steven E. Harris <seh@panix.com>
Steven Roose <stevenroose@gmail.com> Steven Roose <stevenroose@gmail.com>
stompesi <stompesi@gmail.com> stompesi <stompesi@gmail.com>
stormpang <jialinpeng@vip.qq.com> stormpang <jialinpeng@vip.qq.com>
storyicon <storyicon@foxmail.com>
strykerin <dacosta.pereirafabio@gmail.com>
sudeep <sudeepdino008@gmail.com>
SuiYuan <165623542+suiyuan1314@users.noreply.github.com>
Sungwoo Kim <git@sung-woo.kim>
sunxiaojun2014 <sunxiaojun-xy@360.cn> sunxiaojun2014 <sunxiaojun-xy@360.cn>
Suriyaa Sundararuban <isc.suriyaa@gmail.com> Suriyaa Sundararuban <isc.suriyaa@gmail.com>
Sylvain Laurent <s@6120.eu> Sylvain Laurent <s@6120.eu>
Szupingwang <cara4bear@gmail.com>
tactical_retreat <tactical0retreat@gmail.com>
Taeguk Kwon <xornrbboy@gmail.com>
Taeik Lim <sibera21@gmail.com> Taeik Lim <sibera21@gmail.com>
taiking <c.tsujiyan727@gmail.com>
tamirms <tamir@trello.com> tamirms <tamir@trello.com>
Tangui Clairet <tangui.clairet@gmail.com> Tangui Clairet <tangui.clairet@gmail.com>
Tatsuya Shimoda <tacoo@users.noreply.github.com> Tatsuya Shimoda <tacoo@users.noreply.github.com>
@ -495,21 +736,35 @@ Taylor Gerring <taylor.gerring@gmail.com>
TColl <38299499+TColl@users.noreply.github.com> TColl <38299499+TColl@users.noreply.github.com>
terasum <terasum@163.com> terasum <terasum@163.com>
tgyKomgo <52910426+tgyKomgo@users.noreply.github.com> tgyKomgo <52910426+tgyKomgo@users.noreply.github.com>
Thabokani <149070269+Thabokani@users.noreply.github.com>
Thad Guidry <thadguidry@gmail.com> Thad Guidry <thadguidry@gmail.com>
therainisme <therainisme@qq.com>
Thomas Bocek <tom@tomp2p.net> Thomas Bocek <tom@tomp2p.net>
thomasmodeneis <thomas.modeneis@gmail.com> thomasmodeneis <thomas.modeneis@gmail.com>
thumb8432 <thumb8432@gmail.com> thumb8432 <thumb8432@gmail.com>
Ti Zhou <tizhou1986@gmail.com> Ti Zhou <tizhou1986@gmail.com>
tia-99 <67107070+tia-99@users.noreply.github.com> tia-99 <67107070+tia-99@users.noreply.github.com>
tianyeyouyou <tianyeyouyou@gmail.com>
Tien Nguyen <116023870+htiennv@users.noreply.github.com>
Tim Cooijmans <timcooijmans@gmail.com> Tim Cooijmans <timcooijmans@gmail.com>
TinyFoxy <tiny.fox@foxmail.com>
Tobias Hildebrandt <79341166+tobias-hildebrandt@users.noreply.github.com> Tobias Hildebrandt <79341166+tobias-hildebrandt@users.noreply.github.com>
tokikuch <msmania@users.noreply.github.com>
Tom <45168162+tomdever@users.noreply.github.com>
Tosh Camille <tochecamille@gmail.com> Tosh Camille <tochecamille@gmail.com>
trillo <trillo8652@gmail.com>
Tristan-Wilson <87238672+Tristan-Wilson@users.noreply.github.com>
trocher <trooocher@proton.me>
tsarpaul <Litvakpol@012.net.il> tsarpaul <Litvakpol@012.net.il>
TY <45994721+tylerK1294@users.noreply.github.com>
Tyler Chambers <2775339+tylerchambers@users.noreply.github.com> Tyler Chambers <2775339+tylerchambers@users.noreply.github.com>
tylerni7 <tylerni7@gmail.com>
tzapu <alex@tzapu.com> tzapu <alex@tzapu.com>
ucwong <ucwong@126.com> ucwong <ethereum2k@gmail.com>
uji <49834542+uji@users.noreply.github.com> uji <49834542+uji@users.noreply.github.com>
ult-bobonovski <alex@ultiledger.io> ult-bobonovski <alex@ultiledger.io>
Undefinedor <wanghao@imwh.net>
Ursulafe <152976968+Ursulafe@users.noreply.github.com>
Valentin Trinqué <ValentinTrinque@users.noreply.github.com> Valentin Trinqué <ValentinTrinque@users.noreply.github.com>
Valentin Wüstholz <wuestholz@gmail.com> Valentin Wüstholz <wuestholz@gmail.com>
Vedhavyas Singareddi <vedhavyas.singareddi@gmail.com> Vedhavyas Singareddi <vedhavyas.singareddi@gmail.com>
@ -528,39 +783,60 @@ Vitaly V <vvelikodny@gmail.com>
Vivek Anand <vivekanand1101@users.noreply.github.com> Vivek Anand <vivekanand1101@users.noreply.github.com>
Vlad Bokov <razum2um@mail.ru> Vlad Bokov <razum2um@mail.ru>
Vlad Gluhovsky <gluk256@gmail.com> Vlad Gluhovsky <gluk256@gmail.com>
VM <112189277+sysvm@users.noreply.github.com>
vuittont60 <81072379+vuittont60@users.noreply.github.com>
wangjingcun <wangjingcun@aliyun.com>
wangyifan <wangyifan@uchicago.edu>
Ward Bradt <wardbradt5@gmail.com> Ward Bradt <wardbradt5@gmail.com>
Water <44689567+codeoneline@users.noreply.github.com> Water <44689567+codeoneline@users.noreply.github.com>
wbt <wbt@users.noreply.github.com> wbt <wbt@users.noreply.github.com>
Wei Tang <acc@pacna.org>
weimumu <934657014@qq.com> weimumu <934657014@qq.com>
Wenbiao Zheng <delweng@gmail.com> Wenbiao Zheng <delweng@gmail.com>
Wenshao Zhong <wzhong20@uic.edu> Wenshao Zhong <wzhong20@uic.edu>
Wihan de Beer <debeerwihan@gmail.com>
Will Villanueva <hello@willvillanueva.com> Will Villanueva <hello@willvillanueva.com>
William Morriss <wjmelements@gmail.com> William Morriss <wjmelements@gmail.com>
William Setzer <bootstrapsetzer@gmail.com> William Setzer <bootstrapsetzer@gmail.com>
williambannas <wrschwartz@wpi.edu> williambannas <wrschwartz@wpi.edu>
willian.eth <willian@ufpa.br>
winniehere <winnie050812@qq.com>
winterjihwan <113398351+winterjihwan@users.noreply.github.com>
wuff1996 <33193253+wuff1996@users.noreply.github.com> wuff1996 <33193253+wuff1996@users.noreply.github.com>
Wuxiang <wuxiangzhou2010@gmail.com> Wuxiang <wuxiangzhou2010@gmail.com>
Xiaobing Jiang <s7v7nislands@gmail.com> Xiaobing Jiang <s7v7nislands@gmail.com>
xiaodong <81516175+javaandfly@users.noreply.github.com>
xiekeyang <xiekeyang@users.noreply.github.com> xiekeyang <xiekeyang@users.noreply.github.com>
xinbenlv <zzn@zzn.im>
xincaosu <xincaosu@126.com> xincaosu <xincaosu@126.com>
xinluyin <31590468+xinluyin@users.noreply.github.com> xinluyin <31590468+xinluyin@users.noreply.github.com>
xiyang <90125263+JBossBC@users.noreply.github.com>
Xudong Liu <33193253+r1cs@users.noreply.github.com> Xudong Liu <33193253+r1cs@users.noreply.github.com>
xwjack <XWJACK@users.noreply.github.com> xwjack <XWJACK@users.noreply.github.com>
yahtoo <yahtoo.ma@gmail.com> yahtoo <yahtoo.ma@gmail.com>
Yang Hau <vulxj0j8j8@gmail.com> Yang Hau <vulxj0j8j8@gmail.com>
YaoZengzeng <yaozengzeng@zju.edu.cn> YaoZengzeng <yaozengzeng@zju.edu.cn>
ycyraum <ycyraum@fastmail.com>
YH-Zhou <yanhong.zhou05@gmail.com> YH-Zhou <yanhong.zhou05@gmail.com>
Yier <90763233+yierx@users.noreply.github.com>
Yihau Chen <a122092487@gmail.com> Yihau Chen <a122092487@gmail.com>
yihuang <huang@crypto.com>
Yohann Léon <sybiload@gmail.com> Yohann Léon <sybiload@gmail.com>
Yoichi Hirai <i@yoichihirai.com> Yoichi Hirai <i@yoichihirai.com>
Yole <007yuyue@gmail.com> Yole <007yuyue@gmail.com>
Yondon Fu <yondon.fu@gmail.com> Yondon Fu <yondon.fu@gmail.com>
yong <33920876+yzhaoyu@users.noreply.github.com>
YOSHIDA Masanori <masanori.yoshida@gmail.com> YOSHIDA Masanori <masanori.yoshida@gmail.com>
yoza <yoza.is12s@gmail.com> yoza <yoza.is12s@gmail.com>
ysh0566 <ysh0566@qq.com>
yudrywet <166895665+yudrywet@users.noreply.github.com>
yujinpark <petere123123@gmail.com>
yukionfire <yukionfire@qq.com>
yumiel yoomee1313 <yumiel.ko@groundx.xyz> yumiel yoomee1313 <yumiel.ko@groundx.xyz>
Yusup <awklsgrep@gmail.com> Yusup <awklsgrep@gmail.com>
yutianwu <wzxingbupt@gmail.com> yutianwu <wzxingbupt@gmail.com>
ywzqwwt <39263032+ywzqwwt@users.noreply.github.com> ywzqwwt <39263032+ywzqwwt@users.noreply.github.com>
yzb <335357057@qq.com>
zaccoding <zaccoding725@gmail.com> zaccoding <zaccoding725@gmail.com>
Zach <zach.ramsay@gmail.com> Zach <zach.ramsay@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> Zachinquarantine <Zachinquarantine@protonmail.com>
@ -568,24 +844,34 @@ zah <zahary@gmail.com>
Zahoor Mohamed <zahoor@zahoor.in> Zahoor Mohamed <zahoor@zahoor.in>
Zak Cole <zak@beattiecole.com> Zak Cole <zak@beattiecole.com>
zcheng9 <zcheng9@hawk.iit.edu> zcheng9 <zcheng9@hawk.iit.edu>
zeim839 <50573884+zeim839@users.noreply.github.com>
zer0to0ne <36526113+zer0to0ne@users.noreply.github.com> zer0to0ne <36526113+zer0to0ne@users.noreply.github.com>
zgfzgf <48779939+zgfzgf@users.noreply.github.com> zgfzgf <48779939+zgfzgf@users.noreply.github.com>
Zhang Zhuo <mycinbrin@gmail.com> Zhang Zhuo <mycinbrin@gmail.com>
zhangsoledad <787953403@qq.com> zhangsoledad <787953403@qq.com>
zhaochonghe <41711151+zhaochonghe@users.noreply.github.com> zhaochonghe <41711151+zhaochonghe@users.noreply.github.com>
zhen peng <505380967@qq.com>
Zhenguo Niu <Niu.ZGlinux@gmail.com> Zhenguo Niu <Niu.ZGlinux@gmail.com>
Zheyuan He <ecjgvmhc@gmail.com>
Zhihao Lin <3955922+kkqy@users.noreply.github.com>
zhiqiangxu <652732310@qq.com> zhiqiangxu <652732310@qq.com>
Zhou Zhiyao <ZHOU0250@e.ntu.edu.sg> Zhou Zhiyao <ZHOU0250@e.ntu.edu.sg>
Ziyuan Zhong <zzy.albert@163.com> Ziyuan Zhong <zzy.albert@163.com>
Zoe Nolan <github@zoenolan.org> Zoe Nolan <github@zoenolan.org>
zoereco <158379334+zoereco@users.noreply.github.com>
Zoo <zoosilence@gmail.com>
Zoro <40222601+BabyHalimao@users.noreply.github.com>
Zou Guangxian <zouguangxian@gmail.com> Zou Guangxian <zouguangxian@gmail.com>
Zsolt Felföldi <zsfelfoldi@gmail.com> Zsolt Felföldi <zsfelfoldi@gmail.com>
Łukasz Kurowski <crackcomm@users.noreply.github.com> Łukasz Kurowski <crackcomm@users.noreply.github.com>
Łukasz Zimnoch <lukaszzimnoch1994@gmail.com> Łukasz Zimnoch <lukaszzimnoch1994@gmail.com>
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com> ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
Максим Чусовлянов <mchusovlianov@gmail.com> Максим Чусовлянов <mchusovlianov@gmail.com>
かげ <47621124+ronething-bot@users.noreply.github.com>
スパイク <1311798+spkjp@users.noreply.github.com>
大彬 <hz_stb@163.com> 大彬 <hz_stb@163.com>
沉风 <myself659@users.noreply.github.com> 沉风 <myself659@users.noreply.github.com>
牛晓婕 <30611384+niuxiaojie81@users.noreply.github.com>
贺鹏飞 <hpf@hackerful.cn> 贺鹏飞 <hpf@hackerful.cn>
陈佳 <chenjiablog@gmail.com> 陈佳 <chenjiablog@gmail.com>
유용환 <33824408+eric-yoo@users.noreply.github.com> 유용환 <33824408+eric-yoo@users.noreply.github.com>

View file

@ -1,4 +1,4 @@
// Copyright 2023 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify

View file

@ -1,4 +1,4 @@
// Copyright 2016 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify

View file

@ -1,60 +1,54 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
# version:spec-tests 2.1.0 # version:spec-tests pectra-devnet-6@v1.0.0
# https://github.com/ethereum/execution-spec-tests/releases # https://github.com/ethereum/execution-spec-tests/releases
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ # https://github.com/ethereum/execution-spec-tests/releases/download/pectra-devnet-6%40v1.0.0/fixtures_pectra-devnet-6.tar.gz
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz b69211752a3029083c020dc635fe12156ca1a6725a08559da540a0337586a77e fixtures_pectra-devnet-6.tar.gz
# version:golang 1.23.5 # version:golang 1.23.6
# https://go.dev/dl/ # https://go.dev/dl/
a6f3f4bbd3e6bdd626f79b668f212fbb5649daf75084fb79b678a0ae4d97423b go1.23.5.src.tar.gz 039c5b04e65279daceee8a6f71e70bd05cf5b801782b6f77c6e19e2ed0511222 go1.23.6.src.tar.gz
8d8bc7d1b362dd91426da9352741db298ff73e3e0a3ccbe6f607f80ba17647a4 go1.23.5.aix-ppc64.tar.gz adec10f4ba56591f523aa04851f7f6900b1c61508dfa6b80e62717a8e6684a5c go1.23.6.aix-ppc64.tar.gz
d8b310b0b6bd6a630307579165cfac8a37571483c7d6804a10dd73bbefb0827f go1.23.5.darwin-amd64.tar.gz 782da50ce8ec5e98fac2cd3cdc6a1d7130d093294fc310038f651444232a3fb0 go1.23.6.darwin-amd64.tar.gz
d2b06bf0b8299e0187dfe2d8ad39bd3dd96a6d93fe4d1cfd42c7872452f4a0a2 go1.23.5.darwin-amd64.pkg 5cae2450a1708aeb0333237a155640d5562abaf195defebc4306054565536221 go1.23.6.darwin-arm64.tar.gz
047bfce4fbd0da6426bd30cd19716b35a466b1c15a45525ce65b9824acb33285 go1.23.5.darwin-arm64.tar.gz d52efb3020d9332477ade98163c03d2f2fe3e051b0e7e01f0e167412c66de0cb go1.23.6.dragonfly-amd64.tar.gz
f819ed94939e08a5016b9a607ec84ebbde6cb3fe59750c59d97aa300c3fd02df go1.23.5.darwin-arm64.pkg d3287706b5823712ac6cf7dff684a556cff98163ef60e7b275abe3388c17aac7 go1.23.6.freebsd-386.tar.gz
2dec52821e1f04a538d00b2cafe70fa506f2eea94a551bfe3ce1238f1bd4966f go1.23.5.dragonfly-amd64.tar.gz ebb4c6a9b0673dbdabc439877779ed6add16575e21bd0a7955c33f692789aef6 go1.23.6.freebsd-amd64.tar.gz
7204e7bc62913b12f18c61afe0bc1a92fd192c0e45a54125978592296cb84e49 go1.23.5.freebsd-386.tar.gz b7241584afb0b161c09148f8fde16171bb743e47b99d451fbc5f5217ec7a88b6 go1.23.6.freebsd-arm.tar.gz
90a119995ebc3e36082874df5fa8fe6da194946679d01ae8bef33c87aab99391 go1.23.5.freebsd-amd64.tar.gz 004718b53cedd7955d1b1dc4053539fcd1053c031f5f3374334a22befd1f8310 go1.23.6.freebsd-arm64.tar.gz
255d26d873e41ff2fc278013bb2e5f25cf2ebe8d0ec84c07e3bb1436216020d3 go1.23.5.freebsd-arm.tar.gz ca026ec8a30dd0c18164f40e1ce21bd725e2445f11699177d05815189a38de7a go1.23.6.freebsd-riscv64.tar.gz
2785d9122654980b59ca38305a11b34f2a1e12d9f7eb41d52efc137c1fc29e61 go1.23.5.freebsd-arm64.tar.gz 7db973efa3fb2e48e45059b855721550fce8e90803e7373d3efd37b88dd821e8 go1.23.6.illumos-amd64.tar.gz
8f66a94018ab666d56868f61c579aa81e549ac9700979ce6004445d315be2d37 go1.23.5.freebsd-riscv64.tar.gz e61f87693169c0bbcc43363128f1e929b9dff0b7f448573f1bdd4e4a0b9687ba go1.23.6.linux-386.tar.gz
4b7a69928385ec512a4e77a547e24118adbb92301d2be36187ff0852ba9e6303 go1.23.5.illumos-amd64.tar.gz 9379441ea310de000f33a4dc767bd966e72ab2826270e038e78b2c53c2e7802d go1.23.6.linux-amd64.tar.gz
6ecf6a41d0925358905fa2641db0e1c9037aa5b5bcd26ca6734caf50d9196417 go1.23.5.linux-386.tar.gz 561c780e8f4a8955d32bf72e46af0b5ee5e0debe1e4633df9a03781878219202 go1.23.6.linux-arm64.tar.gz
cbcad4a6482107c7c7926df1608106c189417163428200ce357695cc7e01d091 go1.23.5.linux-amd64.tar.gz 27a4611010c16b8c4f37ade3aada55bd5781998f02f348b164302fd5eea4eb74 go1.23.6.linux-armv6l.tar.gz
47c84d332123883653b70da2db7dd57d2a865921ba4724efcdf56b5da7021db0 go1.23.5.linux-arm64.tar.gz c459226424372abc2b35957cc8955dad348330714f7605093325dbb73e33c750 go1.23.6.linux-loong64.tar.gz
04e0b5cf5c216f0aa1bf8204d49312ad0845800ab0702dfe4357c0b1241027a3 go1.23.5.linux-armv6l.tar.gz e2a0aff70b958a3463a7d47132a2d0238369f64578d4f7f95e679e3a5af05622 go1.23.6.linux-mips.tar.gz
e1d14ac2207c78d52b76ba086da18a004c70aeb58cba72cd9bef0da7d1602786 go1.23.5.linux-loong64.tar.gz 7d30ec7db056311d420bf930c16abcae13c0f41c26a202868f279721ec3c2f2f go1.23.6.linux-mips64.tar.gz
d9e937f2fac4fc863850fb4cc31ae76d5495029a62858ef09c78604472d354c0 go1.23.5.linux-mips.tar.gz 74ca7bc475bcc084c6718b74df024d7de9612932cea8a6dc75e29d3a5315a23a go1.23.6.linux-mips64le.tar.gz
59710d0782abafd47e40d1cf96aafa596bbdee09ac7c61062404604f49bd523e go1.23.5.linux-mips64.tar.gz 09bf935a14e9f59a20499989438b1655453480016bdbcb10406acf4df2678ccb go1.23.6.linux-mipsle.tar.gz
bc528cd836b4aa6701a42093ed390ef9929639a0e2818759887dc5539e517cab go1.23.5.linux-mips64le.tar.gz 5cb2f6a5090276c72c5eda8a55896f5a3d6ea0f28d10fa1a50e8318640f02d6c go1.23.6.linux-ppc64.tar.gz
a0404764ea1fd4a175dc5193622b15be6ed1ab59cbfa478f5ae24531bafb6cbd go1.23.5.linux-mipsle.tar.gz 0f817201e83d78ddbfa27f5f78d9b72450b92cc21d5e045145efacd0d3244a99 go1.23.6.linux-ppc64le.tar.gz
db110284a0c91d4545273f210ca95b9f89f6e3ac90f39eb819033a6b96f25897 go1.23.5.linux-ppc64.tar.gz f95f7f817ab22ecab4503d0704d6449ea1aa26a595f57bf9b9f94ddf2aa7c1f3 go1.23.6.linux-riscv64.tar.gz
db268bf5710b5b1b82ab38722ba6e4427d9e4942aed78c7d09195a9dff329613 go1.23.5.linux-ppc64le.tar.gz 321e7ed0d5416f731479c52fa7610b52b8079a8061967bd48cec6d66f671a60e go1.23.6.linux-s390x.tar.gz
d9da15778442464f32acfa777ac731fd4d47362b233b83a0932380cb6d2d5dc8 go1.23.5.linux-riscv64.tar.gz 92d678fb8e1eeeb8c6af6f22e4e5494652dcbb4a320113fc08325cb9956a2d4c go1.23.6.netbsd-386.tar.gz
14924b917d35311eb130e263f34931043d4f9dc65f20684301bf8f60a72edcdf go1.23.5.linux-s390x.tar.gz 86ba51e7bb26b30ea6a8d88ddb79d8e8c83b4116200040ecb7a5a44cf90a8c5c go1.23.6.netbsd-amd64.tar.gz
7b8074102e7f039bd6473c44f58cb323c98dcda48df98ad1f78aaa2664769c8f go1.23.5.netbsd-386.tar.gz 4b974c35345100f0be6ea66afab2781de91ee9882117314126eaf0ae90fd3816 go1.23.6.netbsd-arm.tar.gz
1a466b9c8900e66664b15c07548ecb156e8274cf1028ac5da84134728e6dbbed go1.23.5.netbsd-amd64.tar.gz 53e3589fc38e787a493ea038961f8e40803714dbb42754c1713b00099c12e9b9 go1.23.6.netbsd-arm64.tar.gz
901c9e72038926e37a4dbde8f03d1d81fcb9992850901a3da1da5a25ef93e65b go1.23.5.netbsd-arm.tar.gz 6d2317b3a8505ccebff8f72d943f2ac9b82c115632e54a53a786eff24ced56d9 go1.23.6.openbsd-386.tar.gz
221f69a7c3a920e3666633ee0b4e5c810176982e74339ba4693226996dc636e4 go1.23.5.netbsd-arm64.tar.gz f699e707d95a984fcc00361d91aecdb413d3c75e18235156ffba7a89edf68aae go1.23.6.openbsd-amd64.tar.gz
42e46cbf73febb8e6ddf848765ce1c39573736383b132402cdc487eb6be3ad06 go1.23.5.openbsd-386.tar.gz 3c1cf6ab893657d0bf1942e40ce115acfd27cbce1ccb9bc88fd9cd21ca3d489f go1.23.6.openbsd-arm.tar.gz
f49e81fce17aab21800fab7c4b10c97ab02f8a9c807fdf8641ccf2f87d69289f go1.23.5.openbsd-amd64.tar.gz cc0875535d14001f2da23ae9af89025b28c466e8f4f4c63f991ebb6f4b02f66c go1.23.6.openbsd-arm64.tar.gz
d8bd7269d4670a46e702b64822254a654824347c35923ef1c444d2e8687381ea go1.23.5.openbsd-arm.tar.gz 64de80e29ca66cb566cbf8be030bf8599953af4e48402eab724cbe0a08b40602 go1.23.6.openbsd-ppc64.tar.gz
9cb259adff431d4d28b18e3348e26fe07ea10380675051dcfd740934b5e8b9f2 go1.23.5.openbsd-arm64.tar.gz c398a6b43c569f34bb4a2d16b52f8010eaac9a2a82ecac0602b4338e35cef377 go1.23.6.openbsd-riscv64.tar.gz
72a03223c98fcecfb06e57c3edd584f99fb7f6574a42f59348473f354be1f379 go1.23.5.openbsd-ppc64.tar.gz 10998b6b130bb7b542b407f0db42b86a913b111f8fa86d44394beaace4d45f01 go1.23.6.plan9-386.tar.gz
c06432b859afb36657207382b7bac03f961b8fafc18176b501d239575a9ace64 go1.23.5.openbsd-riscv64.tar.gz 9fbe8065436d8d12c02f19f64f51c9107da3a7a4ac46ab5777e182e9fe88c32f go1.23.6.plan9-amd64.tar.gz
b1f9b12b269ab5cd4aa7ae3dd3075c2407c1ea8bb1211e6835261f98931201cc go1.23.5.plan9-386.tar.gz 8e3c826b884daee2de37e3b070d7eac4cea5d68edab8db09910e22201c75db83 go1.23.6.plan9-arm.tar.gz
45b4026a103e2f6cd436e2b7ad24b24a40dd22c9903519b98b45c535574fa01a go1.23.5.plan9-amd64.tar.gz b619eff63fec86daaea92ca170559e448a58b8ba0b92eef1971bc14e92ea86a7 go1.23.6.solaris-amd64.tar.gz
6e28e26f8c1e8620006490260aa5743198843aa0003c400cb65cbf5e743b21c7 go1.23.5.plan9-arm.tar.gz 96820c0f5d464dd694543329e9b4d413b17c821c03a055717a29e6735b44c2d8 go1.23.6.windows-386.zip
0496c9969f208bd597f3e63fb27068ce1c7ed776618da1007fcc1c8be83ca413 go1.23.5.solaris-amd64.tar.gz 53fec1586850b2cf5ad6438341ff7adc5f6700dd3ec1cfa3f5e8b141df190243 go1.23.6.windows-amd64.zip
8441605a005ea74c28d8c02ca5f2708c17b4df7e91796148b9f8760caafb05c1 go1.23.5.windows-386.zip 22c2518c45c20018afa20d5376dc9fd7a7e74367240ed7b5209e79a30b5c4218 go1.23.6.windows-arm.zip
39962346d8d0cb0cc8716489ee33b08d7a220c24a9e45423487876dd4acbdac6 go1.23.5.windows-386.msi a2d2ec1b3759552bdd9cdf58858f91dfbfd6ab3a472f00b5255acbed30b1aa41 go1.23.6.windows-arm64.zip
96d74945d7daeeb98a7978d0cf099321d7eb821b45f5c510373d545162d39c20 go1.23.5.windows-amd64.zip
03e11a988a18ad7e3f9038cef836330af72ba0a454a502cda7b7faee07a0dd8a go1.23.5.windows-amd64.msi
0005b31dcf9732c280a5cceb6aa1c5ab8284bc2541d0256c221256080acf2a09 go1.23.5.windows-arm.zip
a8442de35cbac230db8c4b20e363055671f2295dc4d6b2b2dfec66b89a3c4bce go1.23.5.windows-arm.msi
4f20c2d8a5a387c227e3ef48c5506b22906139d8afd8d66a78ef3de8dda1d1c3 go1.23.5.windows-arm64.zip
6f54fb46b669345c734936c521f7e0f55555e63ed6e11efbbaaed06f9514773c go1.23.5.windows-arm64.msi
# version:golangci 1.63.4 # version:golangci 1.63.4
# https://github.com/golangci/golangci-lint/releases/ # https://github.com/golangci/golangci-lint/releases/

View file

@ -338,8 +338,8 @@ func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
log.Fatal(err) log.Fatal(err)
} }
ext := ".tar.gz" ext := ".tar.gz"
base := "fixtures_develop" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename base := "fixtures_pectra-devnet-6" // TODO(s1na) rename once the version becomes part of the filename
url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext) url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/%s/%s%s", executionSpecTestsVersion, base, ext)
archivePath := filepath.Join(cachedir, base+ext) archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFile(url, archivePath); err != nil { if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err) log.Fatal(err)

View file

@ -1,3 +1,19 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main package main
import ( import (

View file

@ -1,3 +1,19 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main package main
import ( import (

View file

@ -1,18 +1,18 @@
// Copyright 2022 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of go-ethereum.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by // it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or // the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. // (at your option) any later version.
// //
// The go-ethereum library is distributed in the hope that it will be useful, // go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details. // GNU General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main package main

View file

@ -316,9 +316,6 @@ loop:
return fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x", return fmt.Errorf("wrong head block in status, want: %#x (block %d) have %#x",
want, chain.blocks[chain.Len()-1].NumberU64(), have) want, chain.blocks[chain.Len()-1].NumberU64(), have)
} }
if have, want := msg.TD.Cmp(chain.TD()), 0; have != want {
return fmt.Errorf("wrong TD in status: have %v want %v", have, want)
}
if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) { if have, want := msg.ForkID, chain.ForkID(); !reflect.DeepEqual(have, want) {
return fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want) return fmt.Errorf("wrong fork ID in status: have %v, want %v", have, want)
} }

View file

@ -743,7 +743,7 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
GasTipCap: uint256.NewInt(1), GasTipCap: uint256.NewInt(1),
GasFeeCap: uint256.MustFromBig(s.chain.Head().BaseFee()), GasFeeCap: uint256.MustFromBig(s.chain.Head().BaseFee()),
Gas: 100000, Gas: 100000,
BlobFeeCap: uint256.MustFromBig(eip4844.CalcBlobFee(*s.chain.Head().ExcessBlobGas())), BlobFeeCap: uint256.MustFromBig(eip4844.CalcBlobFee(s.chain.config, s.chain.Head().Header())),
BlobHashes: makeSidecar(blobdata...).BlobHashes(), BlobHashes: makeSidecar(blobdata...).BlobHashes(),
Sidecar: makeSidecar(blobdata...), Sidecar: makeSidecar(blobdata...),
} }

View file

@ -18,7 +18,14 @@
"shanghaiTime": 780, "shanghaiTime": 780,
"cancunTime": 840, "cancunTime": 840,
"terminalTotalDifficulty": 9454784, "terminalTotalDifficulty": 9454784,
"ethash": {} "ethash": {},
"blobSchedule": {
"cancun": {
"target": 3,
"max": 6,
"baseFeeUpdateFraction": 3338477
}
}
}, },
"nonce": "0x0", "nonce": "0x0",
"timestamp": "0x0", "timestamp": "0x0",
@ -108,4 +115,4 @@
"baseFeePerGas": null, "baseFeePerGas": null,
"excessBlobGas": null, "excessBlobGas": null,
"blobGasUsed": null "blobGasUsed": null
} }

View file

@ -1,4 +1,4 @@
// Copyright 2023 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of go-ethereum. // This file is part of go-ethereum.
// //
// go-ethereum is free software: you can redistribute it and/or modify // go-ethereum is free software: you can redistribute it and/or modify

View file

@ -1,3 +1,19 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main package main
import ( import (

View file

@ -178,15 +178,28 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
var excessBlobGas uint64 var excessBlobGas uint64
if pre.Env.ExcessBlobGas != nil { if pre.Env.ExcessBlobGas != nil {
excessBlobGas = *pre.Env.ExcessBlobGas excessBlobGas = *pre.Env.ExcessBlobGas
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) header := &types.Header{
Time: pre.Env.Timestamp,
ExcessBlobGas: pre.Env.ExcessBlobGas,
}
vmContext.BlobBaseFee = eip4844.CalcBlobFee(chainConfig, header)
} else { } else {
// If it is not explicitly defined, but we have the parent values, we try // If it is not explicitly defined, but we have the parent values, we try
// to calculate it ourselves. // to calculate it ourselves.
parentExcessBlobGas := pre.Env.ParentExcessBlobGas parentExcessBlobGas := pre.Env.ParentExcessBlobGas
parentBlobGasUsed := pre.Env.ParentBlobGasUsed parentBlobGasUsed := pre.Env.ParentBlobGasUsed
if parentExcessBlobGas != nil && parentBlobGasUsed != nil { if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
excessBlobGas = eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) parent := &types.Header{
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) Time: pre.Env.ParentTimestamp,
ExcessBlobGas: pre.Env.ParentExcessBlobGas,
BlobGasUsed: pre.Env.ParentBlobGasUsed,
}
header := &types.Header{
Time: pre.Env.Timestamp,
ExcessBlobGas: &excessBlobGas,
}
excessBlobGas = eip4844.CalcExcessBlobGas(chainConfig, parent, header.Time)
vmContext.BlobBaseFee = eip4844.CalcBlobFee(chainConfig, header)
} }
} }
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
@ -229,7 +242,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txBlobGas := uint64(0) txBlobGas := uint64(0)
if tx.Type() == types.BlobTxType { if tx.Type() == types.BlobTxType {
txBlobGas = uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes())) txBlobGas = uint64(params.BlobTxBlobGasPerBlob * len(tx.BlobHashes()))
if used, max := blobGasUsed+txBlobGas, uint64(params.MaxBlobGasPerBlock); used > max { max := eip4844.MaxBlobGasPerBlock(chainConfig, pre.Env.Timestamp)
if used := blobGasUsed + txBlobGas; used > max {
err := fmt.Errorf("blob gas (%d) would exceed maximum allowance %d", used, max) err := fmt.Errorf("blob gas (%d) would exceed maximum allowance %d", used, max)
log.Warn("rejected tx", "index", i, "err", err) log.Warn("rejected tx", "index", i, "err", err)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
@ -405,7 +419,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
statedb, _ := state.New(types.EmptyRootHash, sdb) statedb, _ := state.New(types.EmptyRootHash, sdb)
for addr, a := range accounts { for addr, a := range accounts {
statedb.SetCode(addr, a.Code) statedb.SetCode(addr, a.Code)
statedb.SetNonce(addr, a.Nonce) statedb.SetNonce(addr, a.Nonce, tracing.NonceChangeGenesis)
statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceIncreaseGenesisBalance) statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceIncreaseGenesisBalance)
for k, v := range a.Storage { for k, v := range a.Storage {
statedb.SetState(addr, k, v) statedb.SetState(addr, k, v)

View file

@ -1,18 +1,18 @@
// Copyright 2022 The go-ethereum Authors // Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of go-ethereum.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by // it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or // the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. // (at your option) any later version.
// //
// The go-ethereum library is distributed in the hope that it will be useful, // go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details. // GNU General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main package main

View file

@ -1,5 +1,3 @@
//go:build integrationtests
// Copyright 2023 The go-ethereum Authors // Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum. // This file is part of go-ethereum.
// //
@ -16,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
//go:build integrationtests
package main package main
import ( import (

View file

@ -1,5 +1,3 @@
//go:build integrationtests
// Copyright 2023 The go-ethereum Authors // Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum. // This file is part of go-ethereum.
// //
@ -16,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
//go:build integrationtests
package main package main
import ( import (

View file

@ -1,5 +1,3 @@
//go:build !integrationtests
// Copyright 2023 The go-ethereum Authors // Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum. // This file is part of go-ethereum.
// //
@ -16,6 +14,8 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
//go:build !integrationtests
package main package main
import "github.com/urfave/cli/v2" import "github.com/urfave/cli/v2"

View file

@ -282,7 +282,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if header.ParentBeaconRoot == nil { if header.ParentBeaconRoot == nil {
return errors.New("header is missing beaconRoot") return errors.New("header is missing beaconRoot")
} }
if err := eip4844.VerifyEIP4844Header(parent, header); err != nil { if err := eip4844.VerifyEIP4844Header(chain.Config(), parent, header); err != nil {
return err return err
} }
} }

View file

@ -23,17 +23,20 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/params/forks"
) )
var ( var (
minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice)
blobGaspriceUpdateFraction = big.NewInt(params.BlobTxBlobGaspriceUpdateFraction)
) )
// VerifyEIP4844Header verifies the presence of the excessBlobGas field and that // VerifyEIP4844Header verifies the presence of the excessBlobGas field and that
// if the current block contains no transactions, the excessBlobGas is updated // if the current block contains no transactions, the excessBlobGas is updated
// accordingly. // accordingly.
func VerifyEIP4844Header(parent, header *types.Header) error { func VerifyEIP4844Header(config *params.ChainConfig, parent, header *types.Header) error {
if header.Number.Uint64() != parent.Number.Uint64()+1 {
panic("bad header pair")
}
// Verify the header is not malformed // Verify the header is not malformed
if header.ExcessBlobGas == nil { if header.ExcessBlobGas == nil {
return errors.New("header is missing excessBlobGas") return errors.New("header is missing excessBlobGas")
@ -42,13 +45,24 @@ func VerifyEIP4844Header(parent, header *types.Header) error {
return errors.New("header is missing blobGasUsed") return errors.New("header is missing blobGasUsed")
} }
// Verify that the blob gas used remains within reasonable limits. // Verify that the blob gas used remains within reasonable limits.
if *header.BlobGasUsed > params.MaxBlobGasPerBlock { maxBlobGas := MaxBlobGasPerBlock(config, header.Time)
return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.MaxBlobGasPerBlock) if *header.BlobGasUsed > maxBlobGas {
return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, maxBlobGas)
} }
if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 { if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 {
return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob)
} }
// Verify the excessBlobGas is correct based on the parent header // Verify the excessBlobGas is correct based on the parent header
expectedExcessBlobGas := CalcExcessBlobGas(config, parent, header.Time)
if *header.ExcessBlobGas != expectedExcessBlobGas {
return fmt.Errorf("invalid excessBlobGas: have %d, want %d", *header.ExcessBlobGas, expectedExcessBlobGas)
}
return nil
}
// CalcExcessBlobGas calculates the excess blob gas after applying the set of
// blobs on top of the excess blob gas.
func CalcExcessBlobGas(config *params.ChainConfig, parent *types.Header, headTimestamp uint64) uint64 {
var ( var (
parentExcessBlobGas uint64 parentExcessBlobGas uint64
parentBlobGasUsed uint64 parentBlobGasUsed uint64
@ -57,27 +71,86 @@ func VerifyEIP4844Header(parent, header *types.Header) error {
parentExcessBlobGas = *parent.ExcessBlobGas parentExcessBlobGas = *parent.ExcessBlobGas
parentBlobGasUsed = *parent.BlobGasUsed parentBlobGasUsed = *parent.BlobGasUsed
} }
expectedExcessBlobGas := CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed)
if *header.ExcessBlobGas != expectedExcessBlobGas {
return fmt.Errorf("invalid excessBlobGas: have %d, want %d, parent excessBlobGas %d, parent blobDataUsed %d",
*header.ExcessBlobGas, expectedExcessBlobGas, parentExcessBlobGas, parentBlobGasUsed)
}
return nil
}
// CalcExcessBlobGas calculates the excess blob gas after applying the set of
// blobs on top of the excess blob gas.
func CalcExcessBlobGas(parentExcessBlobGas uint64, parentBlobGasUsed uint64) uint64 {
excessBlobGas := parentExcessBlobGas + parentBlobGasUsed excessBlobGas := parentExcessBlobGas + parentBlobGasUsed
if excessBlobGas < params.BlobTxTargetBlobGasPerBlock { targetGas := uint64(targetBlobsPerBlock(config, headTimestamp)) * params.BlobTxBlobGasPerBlob
if excessBlobGas < targetGas {
return 0 return 0
} }
return excessBlobGas - params.BlobTxTargetBlobGasPerBlock return excessBlobGas - targetGas
} }
// CalcBlobFee calculates the blobfee from the header's excess blob gas field. // CalcBlobFee calculates the blobfee from the header's excess blob gas field.
func CalcBlobFee(excessBlobGas uint64) *big.Int { func CalcBlobFee(config *params.ChainConfig, header *types.Header) *big.Int {
return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(excessBlobGas), blobGaspriceUpdateFraction) var frac uint64
switch config.LatestFork(header.Time) {
case forks.Prague:
frac = config.BlobScheduleConfig.Prague.UpdateFraction
case forks.Cancun:
frac = config.BlobScheduleConfig.Cancun.UpdateFraction
default:
panic("calculating blob fee on unsupported fork")
}
return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(*header.ExcessBlobGas), new(big.Int).SetUint64(frac))
}
// MaxBlobsPerBlock returns the max blobs per block for a block at the given timestamp.
func MaxBlobsPerBlock(cfg *params.ChainConfig, time uint64) int {
if cfg.BlobScheduleConfig == nil {
return 0
}
var (
london = cfg.LondonBlock
s = cfg.BlobScheduleConfig
)
switch {
case cfg.IsPrague(london, time) && s.Prague != nil:
return s.Prague.Max
case cfg.IsCancun(london, time) && s.Cancun != nil:
return s.Cancun.Max
default:
return 0
}
}
// MaxBlobsPerBlock returns the maximum blob gas that can be spent in a block at the given timestamp.
func MaxBlobGasPerBlock(cfg *params.ChainConfig, time uint64) uint64 {
return uint64(MaxBlobsPerBlock(cfg, time)) * params.BlobTxBlobGasPerBlob
}
// LatestMaxBlobsPerBlock returns the latest max blobs per block defined by the
// configuration, regardless of the currently active fork.
func LatestMaxBlobsPerBlock(cfg *params.ChainConfig) int {
s := cfg.BlobScheduleConfig
if s == nil {
return 0
}
switch {
case s.Prague != nil:
return s.Prague.Max
case s.Cancun != nil:
return s.Cancun.Max
default:
return 0
}
}
// targetBlobsPerBlock returns the target number of blobs in a block at the given timestamp.
func targetBlobsPerBlock(cfg *params.ChainConfig, time uint64) int {
if cfg.BlobScheduleConfig == nil {
return 0
}
var (
london = cfg.LondonBlock
s = cfg.BlobScheduleConfig
)
switch {
case cfg.IsPrague(london, time) && s.Prague != nil:
return s.Prague.Target
case cfg.IsCancun(london, time) && s.Cancun != nil:
return s.Cancun.Target
default:
return 0
}
} }
// fakeExponential approximates factor * e ** (numerator / denominator) using // fakeExponential approximates factor * e ** (numerator / denominator) using

View file

@ -21,36 +21,47 @@ import (
"math/big" "math/big"
"testing" "testing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
func TestCalcExcessBlobGas(t *testing.T) { func TestCalcExcessBlobGas(t *testing.T) {
var (
config = params.MainnetChainConfig
targetBlobs = targetBlobsPerBlock(config, *config.CancunTime)
targetBlobGas = uint64(targetBlobs) * params.BlobTxBlobGasPerBlob
)
var tests = []struct { var tests = []struct {
excess uint64 excess uint64
blobs uint64 blobs int
want uint64 want uint64
}{ }{
// The excess blob gas should not increase from zero if the used blob // The excess blob gas should not increase from zero if the used blob
// slots are below - or equal - to the target. // slots are below - or equal - to the target.
{0, 0, 0}, {0, 0, 0},
{0, 1, 0}, {0, 1, 0},
{0, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, 0}, {0, targetBlobs, 0},
// If the target blob gas is exceeded, the excessBlobGas should increase // If the target blob gas is exceeded, the excessBlobGas should increase
// by however much it was overshot // by however much it was overshot
{0, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob}, {0, targetBlobs + 1, params.BlobTxBlobGasPerBlob},
{1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob + 1}, {1, targetBlobs + 1, params.BlobTxBlobGasPerBlob + 1},
{1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 2, 2*params.BlobTxBlobGasPerBlob + 1}, {1, targetBlobs + 2, 2*params.BlobTxBlobGasPerBlob + 1},
// The excess blob gas should decrease by however much the target was // The excess blob gas should decrease by however much the target was
// under-shot, capped at zero. // under-shot, capped at zero.
{params.BlobTxTargetBlobGasPerBlock, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, params.BlobTxTargetBlobGasPerBlock}, {targetBlobGas, targetBlobs, targetBlobGas},
{params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxTargetBlobGasPerBlock - params.BlobTxBlobGasPerBlob}, {targetBlobGas, targetBlobs - 1, targetBlobGas - params.BlobTxBlobGasPerBlob},
{params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, params.BlobTxTargetBlobGasPerBlock - (2 * params.BlobTxBlobGasPerBlob)}, {targetBlobGas, targetBlobs - 2, targetBlobGas - (2 * params.BlobTxBlobGasPerBlob)},
{params.BlobTxBlobGasPerBlob - 1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, 0}, {params.BlobTxBlobGasPerBlob - 1, targetBlobs - 1, 0},
} }
for i, tt := range tests { for i, tt := range tests {
result := CalcExcessBlobGas(tt.excess, tt.blobs*params.BlobTxBlobGasPerBlob) blobGasUsed := uint64(tt.blobs) * params.BlobTxBlobGasPerBlob
header := &types.Header{
ExcessBlobGas: &tt.excess,
BlobGasUsed: &blobGasUsed,
}
result := CalcExcessBlobGas(config, header, *config.CancunTime)
if result != tt.want { if result != tt.want {
t.Errorf("test %d: excess blob gas mismatch: have %v, want %v", i, result, tt.want) t.Errorf("test %d: excess blob gas mismatch: have %v, want %v", i, result, tt.want)
} }
@ -58,6 +69,8 @@ func TestCalcExcessBlobGas(t *testing.T) {
} }
func TestCalcBlobFee(t *testing.T) { func TestCalcBlobFee(t *testing.T) {
zero := uint64(0)
tests := []struct { tests := []struct {
excessBlobGas uint64 excessBlobGas uint64
blobfee int64 blobfee int64
@ -68,7 +81,9 @@ func TestCalcBlobFee(t *testing.T) {
{10 * 1024 * 1024, 23}, {10 * 1024 * 1024, 23},
} }
for i, tt := range tests { for i, tt := range tests {
have := CalcBlobFee(tt.excessBlobGas) config := &params.ChainConfig{LondonBlock: big.NewInt(0), CancunTime: &zero, BlobScheduleConfig: params.DefaultBlobSchedule}
header := &types.Header{ExcessBlobGas: &tt.excessBlobGas}
have := CalcBlobFee(config, header)
if have.Int64() != tt.blobfee { if have.Int64() != tt.blobfee {
t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee) t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee)
} }

View file

@ -113,23 +113,29 @@ const (
// * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted // * the `BlockNumber`, `TxHash`, `TxIndex`, `BlockHash` and `Index` fields of log are deleted
// * the `Bloom` field of receipt is deleted // * the `Bloom` field of receipt is deleted
// * the `BlockIndex` and `TxIndex` fields of txlookup are deleted // * the `BlockIndex` and `TxIndex` fields of txlookup are deleted
//
// - Version 5 // - Version 5
// The following incompatible database changes were added: // The following incompatible database changes were added:
// * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt // * the `TxHash`, `GasCost`, and `ContractAddress` fields are no longer stored for a receipt
// * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the // * the `TxHash`, `GasCost`, and `ContractAddress` fields are computed by looking up the
// receipts' corresponding block // receipts' corresponding block
//
// - Version 6 // - Version 6
// The following incompatible database changes were added: // The following incompatible database changes were added:
// * Transaction lookup information stores the corresponding block number instead of block hash // * Transaction lookup information stores the corresponding block number instead of block hash
//
// - Version 7 // - Version 7
// The following incompatible database changes were added: // The following incompatible database changes were added:
// * Use freezer as the ancient database to maintain all ancient data // * Use freezer as the ancient database to maintain all ancient data
//
// - Version 8 // - Version 8
// The following incompatible database changes were added: // The following incompatible database changes were added:
// * New scheme for contract code in order to separate the codes and trie nodes // * New scheme for contract code in order to separate the codes and trie nodes
//
// - Version 9 // - Version 9
// Total difficulty has been removed from both the key-value store and the // The following incompatible database changes were added:
// ancient store, the td freezer table has been deprecated since that. // * Total difficulty has been removed from both the key-value store and the ancient store.
// * The metadata structure of freezer is changed by adding 'flushOffset'
BlockChainVersion uint64 = 9 BlockChainVersion uint64 = 9
) )
@ -2133,9 +2139,8 @@ func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (co
// processing of a block. These logs are later announced as deleted or reborn. // processing of a block. These logs are later announced as deleted or reborn.
func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log { func (bc *BlockChain) collectLogs(b *types.Block, removed bool) []*types.Log {
var blobGasPrice *big.Int var blobGasPrice *big.Int
excessBlobGas := b.ExcessBlobGas() if b.ExcessBlobGas() != nil {
if excessBlobGas != nil { blobGasPrice = eip4844.CalcBlobFee(bc.chainConfig, b.Header())
blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas)
} }
receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64()) receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64())
if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil { if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil {

View file

@ -143,7 +143,9 @@ func (b *BlockGen) addTx(bc *BlockChain, vmConfig vm.Config, tx *types.Transacti
// instruction will panic during execution if it attempts to access a block number outside // instruction will panic during execution if it attempts to access a block number outside
// of the range created by GenerateChain. // of the range created by GenerateChain.
func (b *BlockGen) AddTx(tx *types.Transaction) { func (b *BlockGen) AddTx(tx *types.Transaction) {
b.addTx(nil, vm.Config{}, tx) // Wrap the chain config in an empty BlockChain object to satisfy ChainContext.
bc := &BlockChain{chainConfig: b.cm.config}
b.addTx(bc, vm.Config{}, tx)
} }
// AddTxWithChain adds a transaction to the generated block. If no coinbase has // AddTxWithChain adds a transaction to the generated block. If no coinbase has
@ -445,7 +447,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
var blobGasPrice *big.Int var blobGasPrice *big.Int
if block.ExcessBlobGas() != nil { if block.ExcessBlobGas() != nil {
blobGasPrice = eip4844.CalcBlobFee(*block.ExcessBlobGas()) blobGasPrice = eip4844.CalcBlobFee(cm.config, block.Header())
} }
if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil {
panic(err) panic(err)
@ -548,7 +550,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
} }
var blobGasPrice *big.Int var blobGasPrice *big.Int
if block.ExcessBlobGas() != nil { if block.ExcessBlobGas() != nil {
blobGasPrice = eip4844.CalcBlobFee(*block.ExcessBlobGas()) blobGasPrice = eip4844.CalcBlobFee(cm.config, block.Header())
} }
if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil { if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil {
panic(err) panic(err)
@ -580,33 +582,26 @@ func GenerateVerkleChainWithGenesis(genesis *Genesis, engine consensus.Engine, n
func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
time := parent.Time() + 10 // block time is fixed at 10 seconds time := parent.Time() + 10 // block time is fixed at 10 seconds
parentHeader := parent.Header()
header := &types.Header{ header := &types.Header{
Root: state.IntermediateRoot(cm.config.IsEIP158(parent.Number())), Root: state.IntermediateRoot(cm.config.IsEIP158(parent.Number())),
ParentHash: parent.Hash(), ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(), Coinbase: parent.Coinbase(),
Difficulty: engine.CalcDifficulty(cm, time, parent.Header()), Difficulty: engine.CalcDifficulty(cm, time, parentHeader),
GasLimit: parent.GasLimit(), GasLimit: parent.GasLimit(),
Number: new(big.Int).Add(parent.Number(), common.Big1), Number: new(big.Int).Add(parent.Number(), common.Big1),
Time: time, Time: time,
} }
if cm.config.IsLondon(header.Number) { if cm.config.IsLondon(header.Number) {
header.BaseFee = eip1559.CalcBaseFee(cm.config, parent.Header()) header.BaseFee = eip1559.CalcBaseFee(cm.config, parentHeader)
if !cm.config.IsLondon(parent.Number()) { if !cm.config.IsLondon(parent.Number()) {
parentGasLimit := parent.GasLimit() * cm.config.ElasticityMultiplier() parentGasLimit := parent.GasLimit() * cm.config.ElasticityMultiplier()
header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit) header.GasLimit = CalcGasLimit(parentGasLimit, parentGasLimit)
} }
} }
if cm.config.IsCancun(header.Number, header.Time) { if cm.config.IsCancun(header.Number, header.Time) {
var ( excessBlobGas := eip4844.CalcExcessBlobGas(cm.config, parentHeader, time)
parentExcessBlobGas uint64
parentBlobGasUsed uint64
)
if parent.ExcessBlobGas() != nil {
parentExcessBlobGas = *parent.ExcessBlobGas()
parentBlobGasUsed = *parent.BlobGasUsed()
}
excessBlobGas := eip4844.CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed)
header.ExcessBlobGas = &excessBlobGas header.ExcessBlobGas = &excessBlobGas
header.BlobGasUsed = new(uint64) header.BlobGasUsed = new(uint64)
header.ParentBeaconRoot = new(common.Hash) header.ParentBeaconRoot = new(common.Hash)

View file

@ -42,7 +42,7 @@ func TestGeneratePOSChain(t *testing.T) {
aa = common.Address{0xaa} aa = common.Address{0xaa}
bb = common.Address{0xbb} bb = common.Address{0xbb}
funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether)) funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether))
config = *params.AllEthashProtocolChanges config = *params.MergedTestChainConfig
gspec = &Genesis{ gspec = &Genesis{
Config: &config, Config: &config,
Alloc: types.GenesisAlloc{ Alloc: types.GenesisAlloc{
@ -57,10 +57,6 @@ func TestGeneratePOSChain(t *testing.T) {
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
) )
config.TerminalTotalDifficulty = common.Big0
config.ShanghaiTime = u64(0)
config.CancunTime = u64(0)
// init 0xaa with some storage elements // init 0xaa with some storage elements
storage := make(map[common.Hash]common.Hash) storage := make(map[common.Hash]common.Hash)
storage[common.Hash{0x00}] = common.Hash{0x00} storage[common.Hash{0x00}] = common.Hash{0x00}

View file

@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -36,6 +37,9 @@ type ChainContext interface {
// GetHeader returns the header corresponding to the hash/number argument pair. // GetHeader returns the header corresponding to the hash/number argument pair.
GetHeader(common.Hash, uint64) *types.Header GetHeader(common.Hash, uint64) *types.Header
// Config returns the chain's configuration.
Config() *params.ChainConfig
} }
// NewEVMBlockContext creates a new context for use in the EVM. // NewEVMBlockContext creates a new context for use in the EVM.
@ -57,7 +61,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
baseFee = new(big.Int).Set(header.BaseFee) baseFee = new(big.Int).Set(header.BaseFee)
} }
if header.ExcessBlobGas != nil { if header.ExcessBlobGas != nil {
blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas) blobBaseFee = eip4844.CalcBlobFee(chain.Config(), header)
} }
if header.Difficulty.Sign() == 0 { if header.Difficulty.Sign() == 0 {
random = &header.MixDigest random = &header.MixDigest

View file

@ -75,6 +75,19 @@ type Genesis struct {
BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844
} }
// copy copies the genesis.
func (g *Genesis) copy() *Genesis {
if g != nil {
cpy := *g
if g.Config != nil {
conf := *g.Config
cpy.Config = &conf
}
return &cpy
}
return nil
}
func ReadGenesis(db ethdb.Database) (*Genesis, error) { func ReadGenesis(db ethdb.Database) (*Genesis, error) {
var genesis Genesis var genesis Genesis
stored := rawdb.ReadCanonicalHash(db, 0) stored := rawdb.ReadCanonicalHash(db, 0)
@ -141,7 +154,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance) statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance)
} }
statedb.SetCode(addr, account.Code) statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce) statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis)
for key, value := range account.Storage { for key, value := range account.Storage {
statedb.SetState(addr, key, value) statedb.SetState(addr, key, value)
} }
@ -167,7 +180,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e
statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance) statedb.AddBalance(addr, uint256.MustFromBig(account.Balance), tracing.BalanceIncreaseGenesisBalance)
} }
statedb.SetCode(addr, account.Code) statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce) statedb.SetNonce(addr, account.Nonce, tracing.NonceChangeGenesis)
for key, value := range account.Storage { for key, value := range account.Storage {
statedb.SetState(addr, key, value) statedb.SetState(addr, key, value)
} }
@ -248,21 +261,17 @@ type ChainOverrides struct {
} }
// apply applies the chain overrides on the supplied chain config. // apply applies the chain overrides on the supplied chain config.
func (o *ChainOverrides) apply(cfg *params.ChainConfig) (*params.ChainConfig, error) { func (o *ChainOverrides) apply(cfg *params.ChainConfig) error {
if o == nil || cfg == nil { if o == nil || cfg == nil {
return cfg, nil return nil
} }
cpy := *cfg
if o.OverrideCancun != nil { if o.OverrideCancun != nil {
cpy.CancunTime = o.OverrideCancun cfg.CancunTime = o.OverrideCancun
} }
if o.OverrideVerkle != nil { if o.OverrideVerkle != nil {
cpy.VerkleTime = o.OverrideVerkle cfg.VerkleTime = o.OverrideVerkle
} }
if err := cpy.CheckConfigForkOrder(); err != nil { return cfg.CheckConfigForkOrder()
return nil, err
}
return &cpy, nil
} }
// SetupGenesisBlock writes or updates the genesis block in db. // SetupGenesisBlock writes or updates the genesis block in db.
@ -281,6 +290,8 @@ func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Gene
} }
func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) { func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
// Copy the genesis, so we can operate on a copy.
genesis = genesis.copy()
// Sanitize the supplied genesis, ensuring it has the associated chain // Sanitize the supplied genesis, ensuring it has the associated chain
// config attached. // config attached.
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
@ -295,17 +306,15 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
} else { } else {
log.Info("Writing custom genesis block") log.Info("Writing custom genesis block")
} }
chainCfg, err := overrides.apply(genesis.Config) if err := overrides.apply(genesis.Config); err != nil {
if err != nil {
return nil, common.Hash{}, nil, err return nil, common.Hash{}, nil, err
} }
genesis.Config = chainCfg
block, err := genesis.Commit(db, triedb) block, err := genesis.Commit(db, triedb)
if err != nil { if err != nil {
return nil, common.Hash{}, nil, err return nil, common.Hash{}, nil, err
} }
return chainCfg, block.Hash(), nil, nil return genesis.Config, block.Hash(), nil, nil
} }
// Commit the genesis if the genesis block exists in the ancient database // Commit the genesis if the genesis block exists in the ancient database
// but the key-value database is empty without initializing the genesis // but the key-value database is empty without initializing the genesis
@ -322,11 +331,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
} else { } else {
log.Info("Writing custom genesis block") log.Info("Writing custom genesis block")
} }
chainCfg, err := overrides.apply(genesis.Config) if err := overrides.apply(genesis.Config); err != nil {
if err != nil {
return nil, common.Hash{}, nil, err return nil, common.Hash{}, nil, err
} }
genesis.Config = chainCfg
if hash := genesis.ToBlock().Hash(); hash != ghash { if hash := genesis.ToBlock().Hash(); hash != ghash {
return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash} return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash}
@ -335,17 +342,15 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
if err != nil { if err != nil {
return nil, common.Hash{}, nil, err return nil, common.Hash{}, nil, err
} }
return chainCfg, block.Hash(), nil, nil return genesis.Config, block.Hash(), nil, nil
} }
// The genesis block has already been committed previously. Verify that the // The genesis block has already been committed previously. Verify that the
// provided genesis with chain overrides matches the existing one, and update // provided genesis with chain overrides matches the existing one, and update
// the stored chain config if necessary. // the stored chain config if necessary.
if genesis != nil { if genesis != nil {
chainCfg, err := overrides.apply(genesis.Config) if err := overrides.apply(genesis.Config); err != nil {
if err != nil {
return nil, common.Hash{}, nil, err return nil, common.Hash{}, nil, err
} }
genesis.Config = chainCfg
if hash := genesis.ToBlock().Hash(); hash != ghash { if hash := genesis.ToBlock().Hash(); hash != ghash {
return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash} return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash}
@ -459,8 +464,12 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
if g.GasLimit == 0 { if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit head.GasLimit = params.GenesisGasLimit
} }
if g.Difficulty == nil && g.Mixhash == (common.Hash{}) { if g.Difficulty == nil {
head.Difficulty = params.GenesisDifficulty if g.Config != nil && g.Config.Ethash == nil {
head.Difficulty = big.NewInt(0)
} else if g.Mixhash == (common.Hash{}) {
head.Difficulty = params.GenesisDifficulty
}
} }
if g.Config != nil && g.Config.IsLondon(common.Big0) { if g.Config != nil && g.Config.IsLondon(common.Big0) {
if g.BaseFee != nil { if g.BaseFee != nil {

View file

@ -44,14 +44,14 @@ func testSetupGenesis(t *testing.T, scheme string) {
var ( var (
customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50") customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50")
customg = Genesis{ customg = Genesis{
Config: &params.ChainConfig{HomesteadBlock: big.NewInt(3)}, Config: &params.ChainConfig{HomesteadBlock: big.NewInt(3), Ethash: &params.EthashConfig{}},
Alloc: types.GenesisAlloc{ Alloc: types.GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
}, },
} }
oldcustomg = customg oldcustomg = customg
) )
oldcustomg.Config = &params.ChainConfig{HomesteadBlock: big.NewInt(2)} oldcustomg.Config = &params.ChainConfig{HomesteadBlock: big.NewInt(2), Ethash: &params.EthashConfig{}}
tests := []struct { tests := []struct {
name string name string
@ -272,11 +272,17 @@ func TestVerkleGenesisCommit(t *testing.T) {
ShanghaiTime: &verkleTime, ShanghaiTime: &verkleTime,
CancunTime: &verkleTime, CancunTime: &verkleTime,
PragueTime: &verkleTime, PragueTime: &verkleTime,
OsakaTime: &verkleTime,
VerkleTime: &verkleTime, VerkleTime: &verkleTime,
TerminalTotalDifficulty: big.NewInt(0), TerminalTotalDifficulty: big.NewInt(0),
EnableVerkleAtGenesis: true, EnableVerkleAtGenesis: true,
Ethash: nil, Ethash: nil,
Clique: nil, Clique: nil,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: params.DefaultCancunBlobConfig,
Prague: params.DefaultPragueBlobConfig,
Verkle: params.DefaultPragueBlobConfig,
},
} }
genesis := &Genesis{ genesis := &Genesis{

View file

@ -587,7 +587,7 @@ func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64,
// Compute effective blob gas price. // Compute effective blob gas price.
var blobGasPrice *big.Int var blobGasPrice *big.Int
if header != nil && header.ExcessBlobGas != nil { if header != nil && header.ExcessBlobGas != nil {
blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas) blobGasPrice = eip4844.CalcBlobFee(config, header)
} }
if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil { if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil {
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)

View file

@ -849,6 +849,7 @@ func TestHeadersRLPStorage(t *testing.T) {
t.Fatalf("failed to create database with ancient backend") t.Fatalf("failed to create database with ancient backend")
} }
defer db.Close() defer db.Close()
// Create blocks // Create blocks
var chain []*types.Block var chain []*types.Block
var pHash common.Hash var pHash common.Hash
@ -864,7 +865,7 @@ func TestHeadersRLPStorage(t *testing.T) {
chain = append(chain, block) chain = append(chain, block)
pHash = block.Hash() pHash = block.Hash()
} }
var receipts []types.Receipts = make([]types.Receipts, 100) receipts := make([]types.Receipts, 100)
// Write first half to ancients // Write first half to ancients
WriteAncientBlocks(db, chain[:50], receipts[:50]) WriteAncientBlocks(db, chain[:50], receipts[:50])
// Write second half to db // Write second half to db

View file

@ -1,4 +1,4 @@
// Copyright 2022 The go-ethereum Authors // Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
@ -12,7 +12,7 @@
// GNU Lesser General Public License for more details. // GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/> // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb package rawdb

View file

@ -58,6 +58,7 @@ const (
stateHistoryStorageData = "storage.data" stateHistoryStorageData = "storage.data"
) )
// stateFreezerNoSnappy configures whether compression is disabled for the state freezer.
var stateFreezerNoSnappy = map[string]bool{ var stateFreezerNoSnappy = map[string]bool{
stateHistoryMeta: true, stateHistoryMeta: true,
stateHistoryAccountIndex: false, stateHistoryAccountIndex: false,

View file

@ -19,6 +19,7 @@ package rawdb
import ( import (
"fmt" "fmt"
"math" "math"
"time"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/golang/snappy" "github.com/golang/snappy"
@ -188,9 +189,6 @@ func (batch *freezerTableBatch) commit() error {
if err != nil { if err != nil {
return err return err
} }
if err := batch.t.head.Sync(); err != nil {
return err
}
dataSize := int64(len(batch.dataBuffer)) dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0] batch.dataBuffer = batch.dataBuffer[:0]
@ -208,6 +206,12 @@ func (batch *freezerTableBatch) commit() error {
// Update metrics. // Update metrics.
batch.t.sizeGauge.Inc(dataSize + indexSize) batch.t.sizeGauge.Inc(dataSize + indexSize)
batch.t.writeMeter.Mark(dataSize + indexSize) batch.t.writeMeter.Mark(dataSize + indexSize)
// Periodically sync the table, todo (rjl493456442) make it configurable?
if time.Since(batch.t.lastSync) > 30*time.Second {
batch.t.lastSync = time.Now()
return batch.t.Sync()
}
return nil return nil
} }

View file

@ -17,93 +17,173 @@
package rawdb package rawdb
import ( import (
"errors"
"io" "io"
"math"
"os" "os"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
const freezerVersion = 1 // The initial version tag of freezer table metadata const (
freezerTableV1 = 1 // Initial version of metadata struct
freezerTableV2 = 2 // Add field: 'flushOffset'
freezerVersion = freezerTableV2 // The current used version
)
// freezerTableMeta wraps all the metadata of the freezer table. // freezerTableMeta is a collection of additional properties that describe the
// freezer table. These properties are designed with error resilience, allowing
// them to be automatically corrected after an error occurs without significantly
// impacting overall correctness.
type freezerTableMeta struct { type freezerTableMeta struct {
// Version is the versioning descriptor of the freezer table. file *os.File // file handler of metadata
Version uint16 version uint16 // version descriptor of the freezer table
// VirtualTail indicates how many items have been marked as deleted. // virtualTail represents the number of items marked as deleted. It is
// Its value is equal to the number of items removed from the table // calculated as the sum of items removed from the table and the items
// plus the number of items hidden in the table, so it should never // hidden within the table, and should never be less than the "actual
// be lower than the "actual tail". // tail".
VirtualTail uint64 //
// If lost due to a crash or other reasons, it will be reset to the number
// of items deleted from the table, causing the previously hidden items
// to become visible, which is an acceptable consequence.
virtualTail uint64
// flushOffset represents the offset in the index file up to which the index
// items along with the corresponding data items in data files has been flushed
// (fsyncd) to disk. Beyond this offset, data integrity is not guaranteed,
// the extra index items along with the associated data items should be removed
// during the startup.
//
// The principle is that all data items above the flush offset are considered
// volatile and should be recoverable if they are discarded after the unclean
// shutdown. If data integrity is required, manually force a sync of the
// freezer before proceeding with further operations (e.g. do freezer.Sync()
// first and then write data to key value store in some circumstances).
//
// The offset could be moved forward by applying sync operation, or be moved
// backward in cases of head/tail truncation, etc.
flushOffset int64
} }
// newMetadata initializes the metadata object with the given virtual tail. // decodeV1 attempts to decode the metadata structure in v1 format. If fails or
func newMetadata(tail uint64) *freezerTableMeta { // the result is incompatible, nil is returned.
func decodeV1(file *os.File) *freezerTableMeta {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return nil
}
type obj struct {
Version uint16
Tail uint64
}
var o obj
if err := rlp.Decode(file, &o); err != nil {
return nil
}
if o.Version != freezerTableV1 {
return nil
}
return &freezerTableMeta{ return &freezerTableMeta{
Version: freezerVersion, file: file,
VirtualTail: tail, version: o.Version,
virtualTail: o.Tail,
} }
} }
// readMetadata reads the metadata of the freezer table from the // decodeV2 attempts to decode the metadata structure in v2 format. If fails or
// given metadata file. // the result is incompatible, nil is returned.
func readMetadata(file *os.File) (*freezerTableMeta, error) { func decodeV2(file *os.File) *freezerTableMeta {
_, err := file.Seek(0, io.SeekStart) _, err := file.Seek(0, io.SeekStart)
if err != nil { if err != nil {
return nil, err return nil
} }
var meta freezerTableMeta type obj struct {
if err := rlp.Decode(file, &meta); err != nil { Version uint16
return nil, err Tail uint64
Offset uint64
}
var o obj
if err := rlp.Decode(file, &o); err != nil {
return nil
}
if o.Version != freezerTableV2 {
return nil
}
if o.Offset > math.MaxInt64 {
log.Error("Invalid flushOffset %d in freezer metadata", o.Offset, "file", file.Name())
return nil
}
return &freezerTableMeta{
file: file,
version: freezerTableV2,
virtualTail: o.Tail,
flushOffset: int64(o.Offset),
} }
return &meta, nil
} }
// writeMetadata writes the metadata of the freezer table into the // newMetadata initializes the metadata object, either by loading it from the file
// given metadata file. // or by constructing a new one from scratch.
func writeMetadata(file *os.File, meta *freezerTableMeta) error { func newMetadata(file *os.File) (*freezerTableMeta, error) {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return err
}
return rlp.Encode(file, meta)
}
// loadMetadata loads the metadata from the given metadata file.
// Initializes the metadata file with the given "actual tail" if
// it's empty.
func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
stat, err := file.Stat() stat, err := file.Stat()
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Write the metadata with the given actual tail into metadata file
// if it's non-existent. There are two possible scenarios here:
// - the freezer table is empty
// - the freezer table is legacy
// In both cases, write the meta into the file with the actual tail
// as the virtual tail.
if stat.Size() == 0 { if stat.Size() == 0 {
m := newMetadata(tail) m := &freezerTableMeta{
if err := writeMetadata(file, m); err != nil { file: file,
version: freezerTableV2,
virtualTail: 0,
flushOffset: 0,
}
if err := m.write(true); err != nil {
return nil, err return nil, err
} }
return m, nil return m, nil
} }
m, err := readMetadata(file) if m := decodeV2(file); m != nil {
if err != nil { return m, nil
return nil, err
} }
// Update the virtual tail with the given actual tail if it's even if m := decodeV1(file); m != nil {
// lower than it. Theoretically it shouldn't happen at all, print return m, nil // legacy metadata
// a warning here.
if m.VirtualTail < tail {
log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
m.VirtualTail = tail
if err := writeMetadata(file, m); err != nil {
return nil, err
}
} }
return m, nil return nil, errors.New("failed to decode metadata")
}
// setVirtualTail sets the virtual tail and flushes the metadata if sync is true.
func (m *freezerTableMeta) setVirtualTail(tail uint64, sync bool) error {
m.virtualTail = tail
return m.write(sync)
}
// setFlushOffset sets the flush offset and flushes the metadata if sync is true.
func (m *freezerTableMeta) setFlushOffset(offset int64, sync bool) error {
m.flushOffset = offset
return m.write(sync)
}
// write flushes the content of metadata into file and performs a fsync if required.
func (m *freezerTableMeta) write(sync bool) error {
type obj struct {
Version uint16
Tail uint64
Offset uint64
}
var o obj
o.Version = freezerVersion // forcibly use the current version
o.Tail = m.virtualTail
o.Offset = uint64(m.flushOffset)
_, err := m.file.Seek(0, io.SeekStart)
if err != nil {
return err
}
if err := rlp.Encode(m.file, &o); err != nil {
return err
}
if !sync {
return nil
}
return m.file.Sync()
} }

View file

@ -19,6 +19,8 @@ package rawdb
import ( import (
"os" "os"
"testing" "testing"
"github.com/ethereum/go-ethereum/rlp"
) )
func TestReadWriteFreezerTableMeta(t *testing.T) { func TestReadWriteFreezerTableMeta(t *testing.T) {
@ -27,36 +29,98 @@ func TestReadWriteFreezerTableMeta(t *testing.T) {
t.Fatalf("Failed to create file %v", err) t.Fatalf("Failed to create file %v", err)
} }
defer f.Close() defer f.Close()
err = writeMetadata(f, newMetadata(100))
meta, err := newMetadata(f)
if err != nil { if err != nil {
t.Fatalf("Failed to write metadata %v", err) t.Fatalf("Failed to new metadata %v", err)
} }
meta, err := readMetadata(f) meta.setVirtualTail(100, false)
meta, err = newMetadata(f)
if err != nil { if err != nil {
t.Fatalf("Failed to read metadata %v", err) t.Fatalf("Failed to reload metadata %v", err)
} }
if meta.Version != freezerVersion { if meta.version != freezerTableV2 {
t.Fatalf("Unexpected version field") t.Fatalf("Unexpected version field")
} }
if meta.VirtualTail != uint64(100) { if meta.virtualTail != uint64(100) {
t.Fatalf("Unexpected virtual tail field") t.Fatalf("Unexpected virtual tail field")
} }
} }
func TestInitializeFreezerTableMeta(t *testing.T) { func TestUpgradeMetadata(t *testing.T) {
f, err := os.CreateTemp(t.TempDir(), "*") f, err := os.CreateTemp(t.TempDir(), "*")
if err != nil { if err != nil {
t.Fatalf("Failed to create file %v", err) t.Fatalf("Failed to create file %v", err)
} }
defer f.Close() defer f.Close()
meta, err := loadMetadata(f, uint64(100))
// Write legacy metadata into file
type obj struct {
Version uint16
Tail uint64
}
var o obj
o.Version = freezerTableV1
o.Tail = 100
if err := rlp.Encode(f, &o); err != nil {
t.Fatalf("Failed to encode %v", err)
}
// Reload the metadata, a silent upgrade is expected
meta, err := newMetadata(f)
if err != nil { if err != nil {
t.Fatalf("Failed to read metadata %v", err) t.Fatalf("Failed to read metadata %v", err)
} }
if meta.Version != freezerVersion { if meta.version != freezerTableV1 {
t.Fatalf("Unexpected version field") t.Fatal("Unexpected version field")
} }
if meta.VirtualTail != uint64(100) { if meta.virtualTail != uint64(100) {
t.Fatalf("Unexpected virtual tail field") t.Fatal("Unexpected virtual tail field")
}
if meta.flushOffset != 0 {
t.Fatal("Unexpected flush offset field")
}
meta.setFlushOffset(100, true)
meta, err = newMetadata(f)
if err != nil {
t.Fatalf("Failed to read metadata %v", err)
}
if meta.version != freezerTableV2 {
t.Fatal("Unexpected version field")
}
if meta.virtualTail != uint64(100) {
t.Fatal("Unexpected virtual tail field")
}
if meta.flushOffset != 100 {
t.Fatal("Unexpected flush offset field")
}
}
func TestInvalidMetadata(t *testing.T) {
f, err := os.CreateTemp(t.TempDir(), "*")
if err != nil {
t.Fatalf("Failed to create file %v", err)
}
defer f.Close()
// Write invalid legacy metadata into file
type obj struct {
Version uint16
Tail uint64
}
var o obj
o.Version = freezerTableV2 // -> invalid version tag
o.Tail = 100
if err := rlp.Encode(f, &o); err != nil {
t.Fatalf("Failed to encode %v", err)
}
_, err = newMetadata(f)
if err == nil {
t.Fatal("Unexpected success")
} }
} }

View file

@ -108,11 +108,13 @@ type freezerTable struct {
head *os.File // File descriptor for the data head of the table head *os.File // File descriptor for the data head of the table
index *os.File // File descriptor for the indexEntry file of the table index *os.File // File descriptor for the indexEntry file of the table
meta *os.File // File descriptor for metadata of the table
files map[uint32]*os.File // open files files map[uint32]*os.File // open files
headId uint32 // number of the currently active head file headId uint32 // number of the currently active head file
tailId uint32 // number of the earliest file tailId uint32 // number of the earliest file
metadata *freezerTableMeta // metadata of the table
lastSync time.Time // Timestamp when the last sync was performed
headBytes int64 // Number of bytes written to the head file headBytes int64 // Number of bytes written to the head file
readMeter *metrics.Meter // Meter for measuring the effective amount of data read readMeter *metrics.Meter // Meter for measuring the effective amount of data read
writeMeter *metrics.Meter // Meter for measuring the effective amount of data written writeMeter *metrics.Meter // Meter for measuring the effective amount of data written
@ -166,10 +168,17 @@ func newTable(path string, name string, readMeter, writeMeter *metrics.Meter, si
return nil, err return nil, err
} }
} }
// Load metadata from the file. The tag will be true if legacy metadata
// is detected.
metadata, err := newMetadata(meta)
if err != nil {
return nil, err
}
// Create the table and repair any past inconsistency // Create the table and repair any past inconsistency
tab := &freezerTable{ tab := &freezerTable{
index: index, index: index,
meta: meta, metadata: metadata,
lastSync: time.Now(),
files: make(map[uint32]*os.File), files: make(map[uint32]*os.File),
readMeter: readMeter, readMeter: readMeter,
writeMeter: writeMeter, writeMeter: writeMeter,
@ -221,13 +230,11 @@ func (t *freezerTable) repair() error {
return err return err
} // New file can't trigger this path } // New file can't trigger this path
} }
// Validate the index file as it might contain some garbage data after the
// power failures.
if err := t.repairIndex(); err != nil { if err := t.repairIndex(); err != nil {
return err return err
} }
// Retrieve the file sizes and prepare for truncation. Note the file size // Retrieve the file sizes and prepare for truncation. Note the file size
// might be changed after index validation. // might be changed after index repair.
if stat, err = t.index.Stat(); err != nil { if stat, err = t.index.Stat(); err != nil {
return err return err
} }
@ -253,12 +260,14 @@ func (t *freezerTable) repair() error {
t.tailId = firstIndex.filenum t.tailId = firstIndex.filenum
t.itemOffset.Store(uint64(firstIndex.offset)) t.itemOffset.Store(uint64(firstIndex.offset))
// Load metadata from the file // Adjust the number of hidden items if it is less than the number of items
meta, err := loadMetadata(t.meta, t.itemOffset.Load()) // being removed.
if err != nil { if t.itemOffset.Load() > t.metadata.virtualTail {
return err if err := t.metadata.setVirtualTail(t.itemOffset.Load(), true); err != nil {
return err
}
} }
t.itemHidden.Store(meta.VirtualTail) t.itemHidden.Store(t.metadata.virtualTail)
// Read the last index, use the default value in case the freezer is empty // Read the last index, use the default value in case the freezer is empty
if offsetsSize == indexEntrySize { if offsetsSize == indexEntrySize {
@ -267,12 +276,6 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize) t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer) lastIndex.unmarshalBinary(buffer)
} }
// Print an error log if the index is corrupted due to an incorrect
// last index item. While it is theoretically possible to have a zero offset
// by storing all zero-size items, it is highly unlikely to occur in practice.
if lastIndex.offset == 0 && offsetsSize/indexEntrySize > 1 {
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "indexes", offsetsSize/indexEntrySize)
}
if t.readonly { if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly) t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
} else { } else {
@ -293,6 +296,7 @@ func (t *freezerTable) repair() error {
return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum) return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum)
} }
verbose = true verbose = true
// Truncate the head file to the last offset pointer // Truncate the head file to the last offset pointer
if contentExp < contentSize { if contentExp < contentSize {
t.logger.Warn("Truncating dangling head", "indexed", contentExp, "stored", contentSize) t.logger.Warn("Truncating dangling head", "indexed", contentExp, "stored", contentSize)
@ -304,11 +308,23 @@ func (t *freezerTable) repair() error {
// Truncate the index to point within the head file // Truncate the index to point within the head file
if contentExp > contentSize { if contentExp > contentSize {
t.logger.Warn("Truncating dangling indexes", "indexes", offsetsSize/indexEntrySize, "indexed", contentExp, "stored", contentSize) t.logger.Warn("Truncating dangling indexes", "indexes", offsetsSize/indexEntrySize, "indexed", contentExp, "stored", contentSize)
if err := truncateFreezerFile(t.index, offsetsSize-indexEntrySize); err != nil {
newOffset := offsetsSize - indexEntrySize
if err := truncateFreezerFile(t.index, newOffset); err != nil {
return err return err
} }
offsetsSize -= indexEntrySize offsetsSize -= indexEntrySize
// If the index file is truncated beyond the flush offset, move the flush
// offset back to the new end of the file. A crash may occur before the
// offset is updated, leaving a dangling reference that points to a position
// outside the file. If so, the offset will be reset to the new end of the
// file during the next run.
if t.metadata.flushOffset > newOffset {
if err := t.metadata.setFlushOffset(newOffset, true); err != nil {
return err
}
}
// Read the new head index, use the default value in case // Read the new head index, use the default value in case
// the freezer is already empty. // the freezer is already empty.
var newLastIndex indexEntry var newLastIndex indexEntry
@ -345,7 +361,7 @@ func (t *freezerTable) repair() error {
if err := t.head.Sync(); err != nil { if err := t.head.Sync(); err != nil {
return err return err
} }
if err := t.meta.Sync(); err != nil { if err := t.metadata.file.Sync(); err != nil {
return err return err
} }
} }
@ -372,7 +388,65 @@ func (t *freezerTable) repair() error {
return nil return nil
} }
// repairIndex validates the integrity of the index file. According to the design, func (t *freezerTable) repairIndex() error {
stat, err := t.index.Stat()
if err != nil {
return err
}
size := stat.Size()
// Validate the items in the index file to ensure the data integrity.
// It's possible some garbage data is retained in the index file after
// the power failures and should be truncated first.
size, err = t.checkIndex(size)
if err != nil {
return err
}
// If legacy metadata is detected, attempt to recover the offset from the
// index file to avoid clearing the entire table.
if t.metadata.version == freezerTableV1 {
t.logger.Info("Recovering freezer flushOffset for legacy table", "offset", size)
return t.metadata.setFlushOffset(size, true)
}
switch {
case size == indexEntrySize && t.metadata.flushOffset == 0:
// It's a new freezer table with no content.
// Move the flush offset to the end of the file.
return t.metadata.setFlushOffset(size, true)
case size == t.metadata.flushOffset:
// flushOffset is aligned with the index file, all is well.
return nil
case size > t.metadata.flushOffset:
// Extra index items have been detected beyond the flush offset. Since these
// entries correspond to data that has not been fully flushed to disk in the
// last run (because of unclean shutdown), their integrity cannot be guaranteed.
// To ensure consistency, these index items will be truncated, as there is no
// reliable way to validate or recover their associated data.
extraSize := size - t.metadata.flushOffset
if t.readonly {
return fmt.Errorf("index file(path: %s, name: %s) contains %d garbage data bytes", t.path, t.name, extraSize)
}
t.logger.Warn("Truncating freezer items after flushOffset", "size", extraSize)
return truncateFreezerFile(t.index, t.metadata.flushOffset)
default: // size < flushOffset
// Flush offset refers to a position larger than index file. The only
// possible scenario for this is: a power failure or system crash has occurred after
// truncating the segment in index file from head or tail, but without updating
// the flush offset. In this case, automatically reset the flush offset with
// the file size which implies the entire index file is complete.
if t.readonly {
return nil // do nothing in read only mode
}
t.logger.Warn("Rewinding freezer flushOffset", "old", t.metadata.flushOffset, "new", size)
return t.metadata.setFlushOffset(size, true)
}
}
// checkIndex validates the integrity of the index file. According to the design,
// the initial entry in the file denotes the earliest data file along with the // the initial entry in the file denotes the earliest data file along with the
// count of deleted items. Following this, all subsequent entries in the file must // count of deleted items. Following this, all subsequent entries in the file must
// be in order. This function identifies any corrupted entries and truncates items // be in order. This function identifies any corrupted entries and truncates items
@ -392,18 +466,11 @@ func (t *freezerTable) repair() error {
// leftover garbage or if all items in the table have zero size is impossible. // leftover garbage or if all items in the table have zero size is impossible.
// In such instances, the file will remain unchanged to prevent potential data // In such instances, the file will remain unchanged to prevent potential data
// loss or misinterpretation. // loss or misinterpretation.
func (t *freezerTable) repairIndex() error { func (t *freezerTable) checkIndex(size int64) (int64, error) {
// Retrieve the file sizes and prepare for validation
stat, err := t.index.Stat()
if err != nil {
return err
}
size := stat.Size()
// Move the read cursor to the beginning of the file // Move the read cursor to the beginning of the file
_, err = t.index.Seek(0, io.SeekStart) _, err := t.index.Seek(0, io.SeekStart)
if err != nil { if err != nil {
return err return 0, err
} }
fr := bufio.NewReader(t.index) fr := bufio.NewReader(t.index)
@ -425,21 +492,21 @@ func (t *freezerTable) repairIndex() error {
entry.unmarshalBinary(buff) entry.unmarshalBinary(buff)
return entry, nil return entry, nil
} }
truncate = func(offset int64) error { truncate = func(offset int64) (int64, error) {
if t.readonly { if t.readonly {
return fmt.Errorf("index file is corrupted at %d, size: %d", offset, size) return 0, fmt.Errorf("index file is corrupted at %d, size: %d", offset, size)
} }
if err := truncateFreezerFile(t.index, offset); err != nil { if err := truncateFreezerFile(t.index, offset); err != nil {
return err return 0, err
} }
log.Warn("Truncated index file", "offset", offset, "truncated", size-offset) log.Warn("Truncated index file", "offset", offset, "truncated", size-offset)
return nil return offset, nil
} }
) )
for offset := int64(0); offset < size; offset += indexEntrySize { for offset := int64(0); offset < size; offset += indexEntrySize {
entry, err := read() entry, err := read()
if err != nil { if err != nil {
return err return 0, err
} }
if offset == 0 { if offset == 0 {
head = entry head = entry
@ -468,10 +535,10 @@ func (t *freezerTable) repairIndex() error {
// the seek operation anyway as a precaution. // the seek operation anyway as a precaution.
_, err = t.index.Seek(0, io.SeekEnd) _, err = t.index.Seek(0, io.SeekEnd)
if err != nil { if err != nil {
return err return 0, err
} }
log.Debug("Verified index file", "items", size/indexEntrySize, "elapsed", common.PrettyDuration(time.Since(start))) log.Debug("Verified index file", "items", size/indexEntrySize, "elapsed", common.PrettyDuration(time.Since(start)))
return nil return size, nil
} }
// checkIndexItems validates the correctness of two consecutive index items based // checkIndexItems validates the correctness of two consecutive index items based
@ -550,12 +617,23 @@ func (t *freezerTable) truncateHead(items uint64) error {
// Truncate the index file first, the tail position is also considered // Truncate the index file first, the tail position is also considered
// when calculating the new freezer table length. // when calculating the new freezer table length.
length := items - t.itemOffset.Load() length := items - t.itemOffset.Load()
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil { newOffset := (length + 1) * indexEntrySize
if err := truncateFreezerFile(t.index, int64(newOffset)); err != nil {
return err return err
} }
if err := t.index.Sync(); err != nil { if err := t.index.Sync(); err != nil {
return err return err
} }
// If the index file is truncated beyond the flush offset, move the flush
// offset back to the new end of the file. A crash may occur before the
// offset is updated, leaving a dangling reference that points to a position
// outside the file. If so, the offset will be reset to the new end of the
// file during the next run.
if t.metadata.flushOffset > int64(newOffset) {
if err := t.metadata.setFlushOffset(int64(newOffset), true); err != nil {
return err
}
}
// Calculate the new expected size of the data file and truncate it // Calculate the new expected size of the data file and truncate it
var expected indexEntry var expected indexEntry
if length == 0 { if length == 0 {
@ -652,7 +730,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
} }
// Update the virtual tail marker and hidden these entries in table. // Update the virtual tail marker and hidden these entries in table.
t.itemHidden.Store(items) t.itemHidden.Store(items)
if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
// Update the virtual tail without fsync, otherwise it will significantly
// impact the overall performance.
if err := t.metadata.setVirtualTail(items, false); err != nil {
return err return err
} }
// Hidden items still fall in the current tail file, no data file // Hidden items still fall in the current tail file, no data file
@ -664,6 +745,18 @@ func (t *freezerTable) truncateTail(items uint64) error {
if t.tailId > newTailId { if t.tailId > newTailId {
return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId) return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
} }
// Sync the table before performing the index tail truncation. A crash may
// occur after truncating the index file without updating the flush offset,
// leaving a dangling offset that points to a position outside the file.
// The offset will be rewound to the end of file during the next run
// automatically and implicitly assumes all the items within the file are
// complete.
//
// Therefore, forcibly flush everything above the offset to ensure this
// assumption is satisfied!
if err := t.doSync(); err != nil {
return err
}
// Count how many items can be deleted from the file. // Count how many items can be deleted from the file.
var ( var (
newDeleted = items newDeleted = items
@ -681,11 +774,6 @@ func (t *freezerTable) truncateTail(items uint64) error {
} }
newDeleted = current newDeleted = current
} }
// Commit the changes of metadata file first before manipulating
// the indexes file.
if err := t.meta.Sync(); err != nil {
return err
}
// Close the index file before shorten it. // Close the index file before shorten it.
if err := t.index.Close(); err != nil { if err := t.index.Close(); err != nil {
return err return err
@ -716,6 +804,21 @@ func (t *freezerTable) truncateTail(items uint64) error {
t.itemOffset.Store(newDeleted) t.itemOffset.Store(newDeleted)
t.releaseFilesBefore(t.tailId, true) t.releaseFilesBefore(t.tailId, true)
// Move the index flush offset backward due to the deletion of an index segment.
// A crash may occur before the offset is updated, leaving a dangling reference
// that points to a position outside the file. If so, the offset will be reset
// to the new end of the file during the next run.
//
// Note, both the index and head data file has been persisted before performing
// tail truncation and all the items in these files are regarded as complete.
shorten := indexEntrySize * int64(newDeleted-deleted)
if t.metadata.flushOffset <= shorten {
return fmt.Errorf("invalid index flush offset: %d, shorten: %d", t.metadata.flushOffset, shorten)
} else {
if err := t.metadata.setFlushOffset(t.metadata.flushOffset-shorten, true); err != nil {
return err
}
}
// Retrieve the new size and update the total size counter // Retrieve the new size and update the total size counter
newSize, err := t.sizeNolock() newSize, err := t.sizeNolock()
if err != nil { if err != nil {
@ -725,40 +828,30 @@ func (t *freezerTable) truncateTail(items uint64) error {
return nil return nil
} }
// Close closes all opened files. // Close closes all opened files and finalizes the freezer table for use.
// This operation must be completed before shutdown to prevent the loss of
// recent writes.
func (t *freezerTable) Close() error { func (t *freezerTable) Close() error {
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
if err := t.doSync(); err != nil {
return err
}
var errs []error var errs []error
doClose := func(f *os.File, sync bool, close bool) { doClose := func(f *os.File) {
if sync && !t.readonly { if err := f.Close(); err != nil {
if err := f.Sync(); err != nil { errs = append(errs, err)
errs = append(errs, err)
}
}
if close {
if err := f.Close(); err != nil {
errs = append(errs, err)
}
} }
} }
// Trying to fsync a file opened in rdonly causes "Access denied" doClose(t.index)
// error on Windows. doClose(t.metadata.file)
doClose(t.index, true, true)
doClose(t.meta, true, true)
// The preopened non-head data-files are all opened in readonly.
// The head is opened in rw-mode, so we sync it here - but since it's also
// part of t.files, it will be closed in the loop below.
doClose(t.head, true, false) // sync but do not close
for _, f := range t.files { for _, f := range t.files {
doClose(f, false, true) // close but do not sync doClose(f)
} }
t.index = nil t.index = nil
t.meta = nil
t.head = nil t.head = nil
t.metadata.file = nil
if errs != nil { if errs != nil {
return fmt.Errorf("%v", errs) return fmt.Errorf("%v", errs)
@ -917,7 +1010,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
defer t.lock.RUnlock() defer t.lock.RUnlock()
// Ensure the table and the item are accessible // Ensure the table and the item are accessible
if t.index == nil || t.head == nil || t.meta == nil { if t.index == nil || t.head == nil || t.metadata.file == nil {
return nil, nil, errClosed return nil, nil, errClosed
} }
var ( var (
@ -1042,6 +1135,9 @@ func (t *freezerTable) advanceHead() error {
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
if err := t.doSync(); err != nil {
return err
}
// We open the next file in truncated mode -- if this file already // We open the next file in truncated mode -- if this file already
// exists, we need to start over from scratch on it. // exists, we need to start over from scratch on it.
nextID := t.headId + 1 nextID := t.headId + 1
@ -1069,7 +1165,18 @@ func (t *freezerTable) advanceHead() error {
func (t *freezerTable) Sync() error { func (t *freezerTable) Sync() error {
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
if t.index == nil || t.head == nil || t.meta == nil {
return t.doSync()
}
// doSync is the internal version of Sync which assumes the lock is already held.
func (t *freezerTable) doSync() error {
// Trying to fsync a file opened in rdonly causes "Access denied"
// error on Windows.
if t.readonly {
return nil
}
if t.index == nil || t.head == nil || t.metadata.file == nil {
return errClosed return errClosed
} }
var err error var err error
@ -1078,10 +1185,18 @@ func (t *freezerTable) Sync() error {
err = e err = e
} }
} }
trackError(t.index.Sync()) trackError(t.index.Sync())
trackError(t.meta.Sync())
trackError(t.head.Sync()) trackError(t.head.Sync())
// A crash may occur before the offset is updated, leaving the offset
// points to a old position. If so, the extra items above the offset
// will be truncated during the next run.
stat, err := t.index.Stat()
if err != nil {
return err
}
offset := stat.Size()
trackError(t.metadata.setFlushOffset(offset, true))
return err return err
} }
@ -1097,13 +1212,8 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string {
} }
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) { func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
meta, err := readMetadata(t.meta) fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n",
if err != nil { t.metadata.version, t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load())
fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
return
}
fmt.Fprintf(w, "Version %d count %d, deleted %d, hidden %d\n", meta.Version,
t.items.Load(), t.itemOffset.Load(), t.itemHidden.Load())
buf := make([]byte, indexEntrySize) buf := make([]byte, indexEntrySize)

View file

@ -262,18 +262,6 @@ func TestSnappyDetection(t *testing.T) {
f.Close() f.Close()
} }
// Open without snappy
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
if err != nil {
t.Fatal(err)
}
if _, err = f.Retrieve(0); err == nil {
f.Close()
t.Fatalf("expected empty table")
}
}
// Open with snappy // Open with snappy
{ {
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
@ -286,6 +274,18 @@ func TestSnappyDetection(t *testing.T) {
t.Fatalf("expected no error, got %v", err) t.Fatalf("expected no error, got %v", err)
} }
} }
// Open without snappy
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
if err != nil {
t.Fatal(err)
}
if _, err = f.Retrieve(0); err == nil {
f.Close()
t.Fatalf("expected empty table")
}
}
} }
func assertFileSize(f string, size int64) error { func assertFileSize(f string, size int64) error {
@ -521,93 +521,53 @@ func TestFreezerOffset(t *testing.T) {
fname := fmt.Sprintf("offset-%d", rand.Uint64()) fname := fmt.Sprintf("offset-%d", rand.Uint64())
// Fill table // Fill table
{ f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) if err != nil {
if err != nil { t.Fatal(err)
t.Fatal(err)
}
// Write 6 x 20 bytes, splitting out into three files
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.commit())
t.Log(f.dumpIndexString(0, 100))
f.Close()
} }
// Write 6 x 20 bytes, splitting out into three files
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.commit())
t.Log(f.dumpIndexString(0, 100))
// Now crop it. // Now crop it.
{ f.truncateTail(4)
// delete files 0 and 1 f.Close()
for i := 0; i < 2; i++ {
p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i))
if err := os.Remove(p); err != nil {
t.Fatal(err)
}
}
// Read the index file
p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname))
indexFile, err := os.OpenFile(p, os.O_RDWR, 0644)
if err != nil {
t.Fatal(err)
}
indexBuf := make([]byte, 7*indexEntrySize)
indexFile.Read(indexBuf)
// Update the index file, so that we store
// [ file = 2, offset = 4 ] at index zero
zeroIndex := indexEntry{
filenum: uint32(2), // First file is 2
offset: uint32(4), // We have removed four items
}
buf := zeroIndex.append(nil)
// Overwrite index zero
copy(indexBuf, buf)
// Remove the four next indices by overwriting
copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
indexFile.WriteAt(indexBuf, 0)
// Need to truncate the moved index items
indexFile.Truncate(indexEntrySize * (1 + 2))
indexFile.Close()
}
// Now open again // Now open again
{ f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) if err != nil {
if err != nil { t.Fatal(err)
t.Fatal(err)
}
defer f.Close()
t.Log(f.dumpIndexString(0, 100))
// It should allow writing item 6.
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
require.NoError(t, batch.commit())
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
2: errOutOfBounds,
3: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x99),
})
} }
t.Log(f.dumpIndexString(0, 100))
// It should allow writing item 6.
batch = f.newBatch()
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99)))
require.NoError(t, batch.commit())
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
2: errOutOfBounds,
3: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x99),
})
f.Close()
// Edit the index again, with a much larger initial offset of 1M. // Edit the index again, with a much larger initial offset of 1M.
{ {
@ -1369,45 +1329,63 @@ func TestRandom(t *testing.T) {
} }
func TestIndexValidation(t *testing.T) { func TestIndexValidation(t *testing.T) {
const ( const dataSize = 10
items = 30
dataSize = 10
)
garbage := indexEntry{ garbage := indexEntry{
filenum: 100, filenum: 100,
offset: 200, offset: 200,
} }
var cases = []struct { var cases = []struct {
offset int64 write int
data []byte offset int64
expItems int data []byte
expItems int
hasCorruption bool
}{ }{
// extend index file with zero bytes at the end // extend index file with zero bytes at the end
{ {
offset: (items + 1) * indexEntrySize, write: 5,
offset: (5 + 1) * indexEntrySize,
data: make([]byte, indexEntrySize), data: make([]byte, indexEntrySize),
expItems: 30, expItems: 5,
},
// extend index file with unaligned zero bytes at the end
{
write: 5,
offset: (5 + 1) * indexEntrySize,
data: make([]byte, indexEntrySize*1.5),
expItems: 5,
}, },
// write garbage in the first non-head item // write garbage in the first non-head item
{ {
write: 5,
offset: indexEntrySize, offset: indexEntrySize,
data: garbage.append(nil), data: garbage.append(nil),
expItems: 0, expItems: 0,
}, },
// write garbage in the first non-head item // write garbage in the middle
{ {
offset: (items/2 + 1) * indexEntrySize, write: 5,
offset: 3 * indexEntrySize,
data: garbage.append(nil), data: garbage.append(nil),
expItems: items / 2, expItems: 2,
},
// fulfill the first data file (but not yet advanced), the zero bytes
// at tail should be truncated.
{
write: 10,
offset: 11 * indexEntrySize,
data: garbage.append(nil),
expItems: 10,
}, },
} }
for _, c := range cases { for _, c := range cases {
fn := fmt.Sprintf("t-%d", rand.Uint64()) fn := fmt.Sprintf("t-%d", rand.Uint64())
f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 100, true, false) f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 10*dataSize, true, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
writeChunks(t, f, items, dataSize) writeChunks(t, f, c.write, dataSize)
// write corrupted data // write corrupted data
f.index.WriteAt(c.data, c.offset) f.index.WriteAt(c.data, c.offset)
@ -1421,10 +1399,10 @@ func TestIndexValidation(t *testing.T) {
for i := 0; i < c.expItems; i++ { for i := 0; i < c.expItems; i++ {
exp := getChunk(10, i) exp := getChunk(10, i)
got, err := f.Retrieve(uint64(i)) got, err := f.Retrieve(uint64(i))
if err != nil { if err != nil && !c.hasCorruption {
t.Fatalf("Failed to read from table, %v", err) t.Fatalf("Failed to read from table, %v", err)
} }
if !bytes.Equal(exp, got) { if !bytes.Equal(exp, got) && !c.hasCorruption {
t.Fatalf("Unexpected item data, want: %v, got: %v", exp, got) t.Fatalf("Unexpected item data, want: %v, got: %v", exp, got)
} }
} }
@ -1433,3 +1411,163 @@ func TestIndexValidation(t *testing.T) {
} }
} }
} }
// TestFlushOffsetTracking tests the flush offset tracking. The offset moving
// in the test is mostly triggered by the advanceHead (new data file) and
// heda/tail truncation.
func TestFlushOffsetTracking(t *testing.T) {
const (
items = 35
dataSize = 10
fileSize = 100
)
fn := fmt.Sprintf("t-%d", rand.Uint64())
f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, true, false)
if err != nil {
t.Fatal(err)
}
// Data files:
// F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full)
writeChunks(t, f, items, dataSize)
var cases = []struct {
op func(*freezerTable)
offset int64
}{
{
// Data files:
// F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full)
func(f *freezerTable) {}, // no-op
31 * indexEntrySize,
},
{
// Write more items to fulfill the newest data file, but the file advance
// is not triggered.
// Data files:
// F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items, full)
func(f *freezerTable) {
batch := f.newBatch()
for i := 0; i < 5; i++ {
batch.AppendRaw(items+uint64(i), make([]byte, dataSize))
}
batch.commit()
},
31 * indexEntrySize,
},
{
// Write more items to trigger the data file advance
// Data files:
// F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(1 item)
func(f *freezerTable) {
batch := f.newBatch()
batch.AppendRaw(items+5, make([]byte, dataSize))
batch.commit()
},
41 * indexEntrySize,
},
{
// Head truncate
// Data files:
// F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item)
func(f *freezerTable) {
f.truncateHead(items + 5)
},
41 * indexEntrySize,
},
{
// Tail truncate
// Data files:
// F1(1 hidden, 9 visible) -> F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item)
func(f *freezerTable) {
f.truncateTail(1)
},
41 * indexEntrySize,
},
{
// Tail truncate
// Data files:
// F2(10 items) -> F3(10 items) -> F4(10 items) -> F5(0 item)
func(f *freezerTable) {
f.truncateTail(10)
},
31 * indexEntrySize,
},
{
// Tail truncate
// Data files:
// F4(10 items) -> F5(0 item)
func(f *freezerTable) {
f.truncateTail(30)
},
11 * indexEntrySize,
},
{
// Head truncate
// Data files:
// F4(9 items)
func(f *freezerTable) {
f.truncateHead(items + 4)
},
10 * indexEntrySize,
},
}
for _, c := range cases {
c.op(f)
if f.metadata.flushOffset != c.offset {
t.Fatalf("Unexpected index flush offset, want: %d, got: %d", c.offset, f.metadata.flushOffset)
}
}
}
func TestTailTruncationCrash(t *testing.T) {
const (
items = 35
dataSize = 10
fileSize = 100
)
fn := fmt.Sprintf("t-%d", rand.Uint64())
f, err := newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, true, false)
if err != nil {
t.Fatal(err)
}
// Data files:
// F1(10 items) -> F2(10 items) -> F3(10 items) -> F4(5 items, non-full)
writeChunks(t, f, items, dataSize)
// The latest 5 items are not persisted yet
if f.metadata.flushOffset != 31*indexEntrySize {
t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 31*indexEntrySize, f.metadata.flushOffset)
}
f.truncateTail(5)
if f.metadata.flushOffset != 31*indexEntrySize {
t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 31*indexEntrySize, f.metadata.flushOffset)
}
// Truncate the first 10 items which results in the first data file
// being removed. The offset should be moved to 26*indexEntrySize.
f.truncateTail(10)
if f.metadata.flushOffset != 26*indexEntrySize {
t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset)
}
// Write the offset back to 31*indexEntrySize to simulate a crash
// which occurs after truncating the index file without updating
// the offset
f.metadata.setFlushOffset(31*indexEntrySize, true)
f, err = newTable(os.TempDir(), fn, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), fileSize, true, false)
if err != nil {
t.Fatal(err)
}
if f.metadata.flushOffset != 26*indexEntrySize {
t.Fatalf("Unexpected index flush offset, want: %d, got: %d", 26*indexEntrySize, f.metadata.flushOffset)
}
}

View file

@ -432,7 +432,7 @@ func (s *StateDB) SetBalance(addr common.Address, amount *uint256.Int, reason tr
} }
} }
func (s *StateDB) SetNonce(addr common.Address, nonce uint64) { func (s *StateDB) SetNonce(addr common.Address, nonce uint64, reason tracing.NonceChangeReason) {
stateObject := s.getOrNewStateObject(addr) stateObject := s.getOrNewStateObject(addr)
if stateObject != nil { if stateObject != nil {
stateObject.SetNonce(nonce) stateObject.SetNonce(nonce)

View file

@ -12,7 +12,7 @@
// GNU Lesser General Public License for more details. // GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/> // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state package state
@ -69,7 +69,7 @@ func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction
{ {
name: "SetNonce", name: "SetNonce",
fn: func(a testAction, s *StateDB) { fn: func(a testAction, s *StateDB) {
s.SetNonce(addr, uint64(a.args[0])) s.SetNonce(addr, uint64(a.args[0]), tracing.NonceChangeUnspecified)
}, },
args: make([]int64, 1), args: make([]int64, 1),
}, },

View file

@ -179,10 +179,13 @@ func (s *hookedStateDB) AddBalance(addr common.Address, amount *uint256.Int, rea
return prev return prev
} }
func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64) { func (s *hookedStateDB) SetNonce(address common.Address, nonce uint64, reason tracing.NonceChangeReason) {
s.inner.SetNonce(address, nonce) prev := s.inner.GetNonce(address)
if s.hooks.OnNonceChange != nil { s.inner.SetNonce(address, nonce, reason)
s.hooks.OnNonceChange(address, nonce-1, nonce) if s.hooks.OnNonceChangeV2 != nil {
s.hooks.OnNonceChangeV2(address, prev, nonce, reason)
} else if s.hooks.OnNonceChange != nil {
s.hooks.OnNonceChange(address, prev, nonce)
} }
} }

View file

@ -85,7 +85,7 @@ func TestHooks(t *testing.T) {
var wants = []string{ var wants = []string{
"0xaa00000000000000000000000000000000000000.balance: 0->100 (BalanceChangeUnspecified)", "0xaa00000000000000000000000000000000000000.balance: 0->100 (BalanceChangeUnspecified)",
"0xaa00000000000000000000000000000000000000.balance: 100->50 (BalanceChangeTransfer)", "0xaa00000000000000000000000000000000000000.balance: 100->50 (BalanceChangeTransfer)",
"0xaa00000000000000000000000000000000000000.nonce: 1336->1337", "0xaa00000000000000000000000000000000000000.nonce: 0->1337",
"0xaa00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728)", "0xaa00000000000000000000000000000000000000.code: (0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470) ->0x1325 (0xa12ae05590de0c93a00bc7ac773c2fdb621e44f814985e72194f921c0050f728)",
"0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000000 ->0x0000000000000000000000000000000000000000000000000000000000000011", "0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000000 ->0x0000000000000000000000000000000000000000000000000000000000000011",
"0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000011 ->0x0000000000000000000000000000000000000000000000000000000000000022", "0xaa00000000000000000000000000000000000000.storage slot 0x0000000000000000000000000000000000000000000000000000000000000001: 0x0000000000000000000000000000000000000000000000000000000000000011 ->0x0000000000000000000000000000000000000000000000000000000000000022",
@ -113,7 +113,7 @@ func TestHooks(t *testing.T) {
}) })
sdb.AddBalance(common.Address{0xaa}, uint256.NewInt(100), tracing.BalanceChangeUnspecified) sdb.AddBalance(common.Address{0xaa}, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
sdb.SubBalance(common.Address{0xaa}, uint256.NewInt(50), tracing.BalanceChangeTransfer) sdb.SubBalance(common.Address{0xaa}, uint256.NewInt(50), tracing.BalanceChangeTransfer)
sdb.SetNonce(common.Address{0xaa}, 1337) sdb.SetNonce(common.Address{0xaa}, 1337, tracing.NonceChangeGenesis)
sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37}) sdb.SetCode(common.Address{0xaa}, []byte{0x13, 37})
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x11")) sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x11"))
sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x22")) sdb.SetState(common.Address{0xaa}, common.HexToHash("0x01"), common.HexToHash("0x22"))

View file

@ -60,7 +60,7 @@ func TestUpdateLeaks(t *testing.T) {
for i := byte(0); i < 255; i++ { for i := byte(0); i < 255; i++ {
addr := common.BytesToAddress([]byte{i}) addr := common.BytesToAddress([]byte{i})
state.AddBalance(addr, uint256.NewInt(uint64(11*i)), tracing.BalanceChangeUnspecified) state.AddBalance(addr, uint256.NewInt(uint64(11*i)), tracing.BalanceChangeUnspecified)
state.SetNonce(addr, uint64(42*i)) state.SetNonce(addr, uint64(42*i), tracing.NonceChangeUnspecified)
if i%2 == 0 { if i%2 == 0 {
state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i})) state.SetState(addr, common.BytesToHash([]byte{i, i, i}), common.BytesToHash([]byte{i, i, i, i}))
} }
@ -95,7 +95,7 @@ func TestIntermediateLeaks(t *testing.T) {
modify := func(state *StateDB, addr common.Address, i, tweak byte) { modify := func(state *StateDB, addr common.Address, i, tweak byte) {
state.SetBalance(addr, uint256.NewInt(uint64(11*i)+uint64(tweak)), tracing.BalanceChangeUnspecified) state.SetBalance(addr, uint256.NewInt(uint64(11*i)+uint64(tweak)), tracing.BalanceChangeUnspecified)
state.SetNonce(addr, uint64(42*i+tweak)) state.SetNonce(addr, uint64(42*i+tweak), tracing.NonceChangeUnspecified)
if i%2 == 0 { if i%2 == 0 {
state.SetState(addr, common.Hash{i, i, i, 0}, common.Hash{}) state.SetState(addr, common.Hash{i, i, i, 0}, common.Hash{})
state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak}) state.SetState(addr, common.Hash{i, i, i, tweak}, common.Hash{i, i, i, i, tweak})
@ -357,7 +357,7 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction {
{ {
name: "SetNonce", name: "SetNonce",
fn: func(a testAction, s *StateDB) { fn: func(a testAction, s *StateDB) {
s.SetNonce(addr, uint64(a.args[0])) s.SetNonce(addr, uint64(a.args[0]), tracing.NonceChangeUnspecified)
}, },
args: make([]int64, 1), args: make([]int64, 1),
}, },

View file

@ -46,25 +46,7 @@ func u64(val uint64) *uint64 { return &val }
// contain invalid transactions // contain invalid transactions
func TestStateProcessorErrors(t *testing.T) { func TestStateProcessorErrors(t *testing.T) {
var ( var (
config = &params.ChainConfig{ config = params.MergedTestChainConfig
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
Ethash: new(params.EthashConfig),
TerminalTotalDifficulty: big.NewInt(0),
ShanghaiTime: new(uint64),
CancunTime: new(uint64),
PragueTime: new(uint64),
}
signer = types.LatestSigner(config) signer = types.LatestSigner(config)
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
key2, _ = crypto.HexToECDSA("0202020202020202020202020202020202020202020202020202002020202020") key2, _ = crypto.HexToECDSA("0202020202020202020202020202020202020202020202020202002020202020")
@ -425,12 +407,7 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
} }
header.Root = common.BytesToHash(hasher.Sum(nil)) header.Root = common.BytesToHash(hasher.Sum(nil))
if config.IsCancun(header.Number, header.Time) { if config.IsCancun(header.Number, header.Time) {
var pExcess, pUsed = uint64(0), uint64(0) excess := eip4844.CalcExcessBlobGas(config, parent.Header(), header.Time)
if parent.ExcessBlobGas() != nil {
pExcess = *parent.ExcessBlobGas()
pUsed = *parent.BlobGasUsed()
}
excess := eip4844.CalcExcessBlobGas(pExcess, pUsed)
used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) used := uint64(nBlobs * params.BlobTxBlobGasPerBlob)
header.ExcessBlobGas = &excess header.ExcessBlobGas = &excess
header.BlobGasUsed = &used header.BlobGasUsed = &used

View file

@ -495,7 +495,7 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
} }
} else { } else {
// Increment the nonce for the next transaction. // Increment the nonce for the next transaction.
st.state.SetNonce(msg.From, st.state.GetNonce(msg.From)+1) st.state.SetNonce(msg.From, st.state.GetNonce(msg.From)+1, tracing.NonceChangeEoACall)
// Apply EIP-7702 authorizations. // Apply EIP-7702 authorizations.
if msg.SetCodeAuthorizations != nil { if msg.SetCodeAuthorizations != nil {
@ -610,7 +610,7 @@ func (st *stateTransition) applyAuthorization(auth *types.SetCodeAuthorization)
} }
// Update nonce and account code. // Update nonce and account code.
st.state.SetNonce(authority, auth.Nonce+1) st.state.SetNonce(authority, auth.Nonce+1, tracing.NonceChangeAuthorization)
if auth.Address == (common.Address{}) { if auth.Address == (common.Address{}) {
// Delegation to zero address means clear. // Delegation to zero address means clear.
st.state.SetCode(authority, nil) st.state.SetCode(authority, nil)

View file

@ -4,6 +4,53 @@ All notable changes to the tracing interface will be documented in this file.
## [Unreleased] ## [Unreleased]
The tracing interface has been extended with backwards-compatible changes to support more use-cases and simplify tracer code. The most notable change is a state journaling library which emits reverse events when a call is reverted.
### Deprecated methods
- `OnSystemCallStart()`: This hook is deprecated in favor of `OnSystemCallStartV2(vm *VMContext)`.
- `OnNonceChange(addr common.Address, prev, new uint64)`: This hook is deprecated in favor of `OnNonceChangeV2(addr common.Address, prev, new uint64, reason NonceChangeReason)`.
### New methods
- `OnBlockHashRead(blockNum uint64, hash common.Hash)`: This hook is called when a block hash is read by EVM.
- `OnSystemCallStartV2(vm *VMContext)`. This allows access to EVM context during system calls. It is a successor to `OnSystemCallStart`.
- `OnNonceChangeV2(addr common.Address, prev, new uint64, reason NonceChangeReason)`: This hook is called when a nonce change occurs. It is a successor to `OnNonceChange`.
### New types
- `NonceChangeReason` is a new type used to provide a reason for nonce changes. Notably it includes `NonceChangeRevert` which will be emitted by the state journaling library when a nonce change is due to a revert.
### Modified types
- `VMContext.StateDB` has been extended with `GetCodeHash(addr common.Address) common.Hash` method used to retrieve the code hash an account.
- `BalanceChangeReason` has been extended with the `BalanceChangeRevert` reason. More on that below.
### State journaling
Tracers receive state changes events from the node. The tracer was so far expected to keep track of modified accounts and slots and revert those changes when a call frame failed. Now a utility tracer wrapper is provided which will emit "reverse change" events when a call frame fails. To use this feature the hooks have to be wrapped prior to registering the tracer. The following example demonstrates how to use the state journaling library:
```go
func init() {
tracers.LiveDirectory.Register("test", func (cfg json.RawMessage) (*tracing.Hooks, error) {
hooks, err := newTestTracer(cfg)
if err != nil {
return nil, err
}
return tracing.WrapWithJournal(hooks)
})
}
```
The state changes that are covered by the journaling library are:
- `OnBalanceChange`. Note that `OnBalanceChange` will carry the `BalanceChangeRevert` reason.
- `OnNonceChange`, `OnNonceChangeV2`
- `OnCodeChange`
- `OnStorageChange`
## [v1.14.9](https://github.com/ethereum/go-ethereum/releases/tag/v1.14.9)
### Modified types ### Modified types
- `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork. - `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork.

View file

@ -23,11 +23,12 @@ func _() {
_ = x[BalanceIncreaseSelfdestruct-12] _ = x[BalanceIncreaseSelfdestruct-12]
_ = x[BalanceDecreaseSelfdestruct-13] _ = x[BalanceDecreaseSelfdestruct-13]
_ = x[BalanceDecreaseSelfdestructBurn-14] _ = x[BalanceDecreaseSelfdestructBurn-14]
_ = x[BalanceChangeRevert-15]
} }
const _BalanceChangeReason_name = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurn" const _BalanceChangeReason_name = "BalanceChangeUnspecifiedBalanceIncreaseRewardMineUncleBalanceIncreaseRewardMineBlockBalanceIncreaseWithdrawalBalanceIncreaseGenesisBalanceBalanceIncreaseRewardTransactionFeeBalanceDecreaseGasBuyBalanceIncreaseGasReturnBalanceIncreaseDaoContractBalanceDecreaseDaoAccountBalanceChangeTransferBalanceChangeTouchAccountBalanceIncreaseSelfdestructBalanceDecreaseSelfdestructBalanceDecreaseSelfdestructBurnBalanceChangeRevert"
var _BalanceChangeReason_index = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400} var _BalanceChangeReason_index = [...]uint16{0, 24, 54, 84, 109, 138, 173, 194, 218, 244, 269, 290, 315, 342, 369, 400, 419}
func (i BalanceChangeReason) String() string { func (i BalanceChangeReason) String() string {
if i >= BalanceChangeReason(len(_BalanceChangeReason_index)-1) { if i >= BalanceChangeReason(len(_BalanceChangeReason_index)-1) {

View file

@ -14,6 +14,14 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package tracing defines hooks for 'live tracing' of block processing and transaction
// execution. Here we define the low-level [Hooks] object that carries hooks which are
// invoked by the go-ethereum core at various points in the state transition.
//
// To create a tracer that can be invoked with Geth, you need to register it using
// [github.com/ethereum/go-ethereum/eth/tracers.LiveDirectory.Register].
//
// See https://geth.ethereum.org/docs/developers/evm-tracing/live-tracing for a tutorial.
package tracing package tracing
import ( import (
@ -163,6 +171,9 @@ type (
// NonceChangeHook is called when the nonce of an account changes. // NonceChangeHook is called when the nonce of an account changes.
NonceChangeHook = func(addr common.Address, prev, new uint64) NonceChangeHook = func(addr common.Address, prev, new uint64)
// NonceChangeHookV2 is called when the nonce of an account changes.
NonceChangeHookV2 = func(addr common.Address, prev, new uint64, reason NonceChangeReason)
// CodeChangeHook is called when the code of an account changes. // CodeChangeHook is called when the code of an account changes.
CodeChangeHook = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) CodeChangeHook = func(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte)
@ -171,6 +182,9 @@ type (
// LogHook is called when a log is emitted. // LogHook is called when a log is emitted.
LogHook = func(log *types.Log) LogHook = func(log *types.Log)
// BlockHashReadHook is called when EVM reads the blockhash of a block.
BlockHashReadHook = func(blockNumber uint64, hash common.Hash)
) )
type Hooks struct { type Hooks struct {
@ -195,9 +209,12 @@ type Hooks struct {
// State events // State events
OnBalanceChange BalanceChangeHook OnBalanceChange BalanceChangeHook
OnNonceChange NonceChangeHook OnNonceChange NonceChangeHook
OnNonceChangeV2 NonceChangeHookV2
OnCodeChange CodeChangeHook OnCodeChange CodeChangeHook
OnStorageChange StorageChangeHook OnStorageChange StorageChangeHook
OnLog LogHook OnLog LogHook
// Block hash read
OnBlockHashRead BlockHashReadHook
} }
// BalanceChangeReason is used to indicate the reason for a balance change, useful // BalanceChangeReason is used to indicate the reason for a balance change, useful
@ -249,6 +266,10 @@ const (
// account within the same tx (captured at end of tx). // account within the same tx (captured at end of tx).
// Note it doesn't account for a self-destruct which appoints itself as recipient. // Note it doesn't account for a self-destruct which appoints itself as recipient.
BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14 BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14
// BalanceChangeRevert is emitted when the balance is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
BalanceChangeRevert BalanceChangeReason = 15
) )
// GasChangeReason is used to indicate the reason for a gas change, useful // GasChangeReason is used to indicate the reason for a gas change, useful
@ -321,3 +342,29 @@ const (
// it will be "manually" tracked by a direct emit of the gas change event. // it will be "manually" tracked by a direct emit of the gas change event.
GasChangeIgnored GasChangeReason = 0xFF GasChangeIgnored GasChangeReason = 0xFF
) )
// NonceChangeReason is used to indicate the reason for a nonce change.
type NonceChangeReason byte
const (
NonceChangeUnspecified NonceChangeReason = 0
// NonceChangeGenesis is the nonce allocated to accounts at genesis.
NonceChangeGenesis NonceChangeReason = 1
// NonceChangeEoACall is the nonce change due to an EoA call.
NonceChangeEoACall NonceChangeReason = 2
// NonceChangeContractCreator is the nonce change of an account creating a contract.
NonceChangeContractCreator NonceChangeReason = 3
// NonceChangeNewContract is the nonce change of a newly created contract.
NonceChangeNewContract NonceChangeReason = 4
// NonceChangeTransaction is the nonce change due to a EIP-7702 authorization.
NonceChangeAuthorization NonceChangeReason = 5
// NonceChangeRevert is emitted when the nonce is reverted back to a previous value due to call failure.
// It is only emitted when the tracer has opted in to use the journaling wrapper (WrapWithJournal).
NonceChangeRevert NonceChangeReason = 6
)

237
core/tracing/journal.go Normal file
View file

@ -0,0 +1,237 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tracing
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
// journal is a state change journal to be wrapped around a tracer.
// It will emit the state change hooks with reverse values when a call reverts.
type journal struct {
hooks *Hooks
entries []entry
revisions []int
}
type entry interface {
revert(tracer *Hooks)
}
// WrapWithJournal wraps the given tracer with a journaling layer.
func WrapWithJournal(hooks *Hooks) (*Hooks, error) {
if hooks == nil {
return nil, fmt.Errorf("wrapping nil tracer")
}
// No state change to journal, return the wrapped hooks as is
if hooks.OnBalanceChange == nil && hooks.OnNonceChange == nil && hooks.OnNonceChangeV2 == nil && hooks.OnCodeChange == nil && hooks.OnStorageChange == nil {
return hooks, nil
}
if hooks.OnNonceChange != nil && hooks.OnNonceChangeV2 != nil {
return nil, fmt.Errorf("cannot have both OnNonceChange and OnNonceChangeV2")
}
// Create a new Hooks instance and copy all hooks
wrapped := *hooks
// Create journal
j := &journal{hooks: hooks}
// Scope hooks need to be re-implemented.
wrapped.OnTxEnd = j.OnTxEnd
wrapped.OnEnter = j.OnEnter
wrapped.OnExit = j.OnExit
// Wrap state change hooks.
if hooks.OnBalanceChange != nil {
wrapped.OnBalanceChange = j.OnBalanceChange
}
if hooks.OnNonceChange != nil || hooks.OnNonceChangeV2 != nil {
// Regardless of which hook version is used in the tracer,
// the journal will want to capture the nonce change reason.
wrapped.OnNonceChangeV2 = j.OnNonceChangeV2
// A precaution to ensure EVM doesn't call both hooks.
wrapped.OnNonceChange = nil
}
if hooks.OnCodeChange != nil {
wrapped.OnCodeChange = j.OnCodeChange
}
if hooks.OnStorageChange != nil {
wrapped.OnStorageChange = j.OnStorageChange
}
return &wrapped, nil
}
// reset clears the journal, after this operation the journal can be used anew.
// It is semantically similar to calling 'NewJournal', but the underlying slices
// can be reused.
func (j *journal) reset() {
j.entries = j.entries[:0]
j.revisions = j.revisions[:0]
}
// snapshot records a revision and stores it to the revision stack.
func (j *journal) snapshot() {
rev := len(j.entries)
j.revisions = append(j.revisions, rev)
}
// revert reverts all state changes up to the last tracked revision.
func (j *journal) revert(hooks *Hooks) {
// Replay the journal entries above the last revision to undo changes,
// then remove the reverted changes from the journal.
rev := j.revisions[len(j.revisions)-1]
for i := len(j.entries) - 1; i >= rev; i-- {
j.entries[i].revert(hooks)
}
j.entries = j.entries[:rev]
j.popRevision()
}
// popRevision removes an item from the revision stack. This basically forgets about
// the last call to snapshot() and moves to the one prior.
func (j *journal) popRevision() {
j.revisions = j.revisions[:len(j.revisions)-1]
}
// OnTxEnd resets the journal since each transaction has its own EVM call stack.
func (j *journal) OnTxEnd(receipt *types.Receipt, err error) {
j.reset()
if j.hooks.OnTxEnd != nil {
j.hooks.OnTxEnd(receipt, err)
}
}
// OnEnter is invoked for each EVM call frame and records a journal revision.
func (j *journal) OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
j.snapshot()
if j.hooks.OnEnter != nil {
j.hooks.OnEnter(depth, typ, from, to, input, gas, value)
}
}
// OnExit is invoked when an EVM call frame ends.
// If the call has reverted, all state changes made by that frame are undone.
// If the call did not revert, we forget about changes in that revision.
func (j *journal) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) {
if reverted {
j.revert(j.hooks)
} else {
j.popRevision()
}
if j.hooks.OnExit != nil {
j.hooks.OnExit(depth, output, gasUsed, err, reverted)
}
}
func (j *journal) OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason) {
j.entries = append(j.entries, balanceChange{addr: addr, prev: prev, new: new})
if j.hooks.OnBalanceChange != nil {
j.hooks.OnBalanceChange(addr, prev, new, reason)
}
}
func (j *journal) OnNonceChangeV2(addr common.Address, prev, new uint64, reason NonceChangeReason) {
// When a contract is created, the nonce of the creator is incremented.
// This change is not reverted when the creation fails.
if reason != NonceChangeContractCreator {
j.entries = append(j.entries, nonceChange{addr: addr, prev: prev, new: new})
}
if j.hooks.OnNonceChangeV2 != nil {
j.hooks.OnNonceChangeV2(addr, prev, new, reason)
} else if j.hooks.OnNonceChange != nil {
j.hooks.OnNonceChange(addr, prev, new)
}
}
func (j *journal) OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) {
j.entries = append(j.entries, codeChange{
addr: addr,
prevCodeHash: prevCodeHash,
prevCode: prevCode,
newCodeHash: codeHash,
newCode: code,
})
if j.hooks.OnCodeChange != nil {
j.hooks.OnCodeChange(addr, prevCodeHash, prevCode, codeHash, code)
}
}
func (j *journal) OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash) {
j.entries = append(j.entries, storageChange{addr: addr, slot: slot, prev: prev, new: new})
if j.hooks.OnStorageChange != nil {
j.hooks.OnStorageChange(addr, slot, prev, new)
}
}
type (
balanceChange struct {
addr common.Address
prev *big.Int
new *big.Int
}
nonceChange struct {
addr common.Address
prev uint64
new uint64
}
codeChange struct {
addr common.Address
prevCodeHash common.Hash
prevCode []byte
newCodeHash common.Hash
newCode []byte
}
storageChange struct {
addr common.Address
slot common.Hash
prev common.Hash
new common.Hash
}
)
func (b balanceChange) revert(hooks *Hooks) {
if hooks.OnBalanceChange != nil {
hooks.OnBalanceChange(b.addr, b.new, b.prev, BalanceChangeRevert)
}
}
func (n nonceChange) revert(hooks *Hooks) {
if hooks.OnNonceChangeV2 != nil {
hooks.OnNonceChangeV2(n.addr, n.new, n.prev, NonceChangeRevert)
} else if hooks.OnNonceChange != nil {
hooks.OnNonceChange(n.addr, n.new, n.prev)
}
}
func (c codeChange) revert(hooks *Hooks) {
if hooks.OnCodeChange != nil {
hooks.OnCodeChange(c.addr, c.newCodeHash, c.newCode, c.prevCodeHash, c.prevCode)
}
}
func (s storageChange) revert(hooks *Hooks) {
if hooks.OnStorageChange != nil {
hooks.OnStorageChange(s.addr, s.slot, s.new, s.prev)
}
}

View file

@ -0,0 +1,335 @@
// Copyright 2025 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tracing
import (
"errors"
"math/big"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
)
type testTracer struct {
t *testing.T
bal *big.Int
nonce uint64
code []byte
storage map[common.Hash]common.Hash
}
func (t *testTracer) OnBalanceChange(addr common.Address, prev *big.Int, new *big.Int, reason BalanceChangeReason) {
t.t.Logf("OnBalanceChange(%v, %v -> %v, %v)", addr, prev, new, reason)
if t.bal != nil && t.bal.Cmp(prev) != 0 {
t.t.Errorf(" !! wrong prev balance (expected %v)", t.bal)
}
t.bal = new
}
func (t *testTracer) OnNonceChange(addr common.Address, prev uint64, new uint64) {
t.t.Logf("OnNonceChange(%v, %v -> %v)", addr, prev, new)
t.nonce = new
}
func (t *testTracer) OnNonceChangeV2(addr common.Address, prev uint64, new uint64, reason NonceChangeReason) {
t.t.Logf("OnNonceChangeV2(%v, %v -> %v, %v)", addr, prev, new, reason)
t.nonce = new
}
func (t *testTracer) OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte) {
t.t.Logf("OnCodeChange(%v, %v -> %v)", addr, prevCodeHash, codeHash)
t.code = code
}
func (t *testTracer) OnStorageChange(addr common.Address, slot common.Hash, prev common.Hash, new common.Hash) {
t.t.Logf("OnStorageCodeChange(%v, %v, %v -> %v)", addr, slot, prev, new)
if t.storage == nil {
t.storage = make(map[common.Hash]common.Hash)
}
if new == (common.Hash{}) {
delete(t.storage, slot)
} else {
t.storage[slot] = new
}
}
func TestJournalIntegration(t *testing.T) {
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange, OnCodeChange: tr.OnCodeChange, OnStorageChange: tr.OnStorageChange})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}
addr := common.HexToAddress("0x1234")
{
wr.OnEnter(0, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnBalanceChange(addr, nil, big.NewInt(100), BalanceChangeUnspecified)
wr.OnCodeChange(addr, common.Hash{}, nil, common.Hash{}, []byte{1, 2, 3})
wr.OnStorageChange(addr, common.Hash{1}, common.Hash{}, common.Hash{2})
{
wr.OnEnter(1, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnNonceChangeV2(addr, 0, 1, NonceChangeUnspecified)
wr.OnBalanceChange(addr, big.NewInt(100), big.NewInt(200), BalanceChangeUnspecified)
wr.OnBalanceChange(addr, big.NewInt(200), big.NewInt(250), BalanceChangeUnspecified)
wr.OnStorageChange(addr, common.Hash{1}, common.Hash{2}, common.Hash{3})
wr.OnStorageChange(addr, common.Hash{2}, common.Hash{}, common.Hash{4})
wr.OnExit(1, nil, 100, errors.New("revert"), true)
}
wr.OnExit(0, nil, 150, nil, false)
}
if tr.bal.Cmp(big.NewInt(100)) != 0 {
t.Fatalf("unexpected balance: %v", tr.bal)
}
if tr.nonce != 0 {
t.Fatalf("unexpected nonce: %v", tr.nonce)
}
if len(tr.code) != 3 {
t.Fatalf("unexpected code: %v", tr.code)
}
if len(tr.storage) != 1 {
t.Fatalf("unexpected storage len. want %d, have %d", 1, len(tr.storage))
}
if tr.storage[common.Hash{1}] != (common.Hash{2}) {
t.Fatalf("unexpected storage. want %v, have %v", common.Hash{2}, tr.storage[common.Hash{1}])
}
}
func TestJournalTopRevert(t *testing.T) {
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}
addr := common.HexToAddress("0x1234")
{
wr.OnEnter(0, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnBalanceChange(addr, big.NewInt(0), big.NewInt(100), BalanceChangeUnspecified)
{
wr.OnEnter(1, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnNonceChangeV2(addr, 0, 1, NonceChangeUnspecified)
wr.OnBalanceChange(addr, big.NewInt(100), big.NewInt(200), BalanceChangeUnspecified)
wr.OnBalanceChange(addr, big.NewInt(200), big.NewInt(250), BalanceChangeUnspecified)
wr.OnExit(1, nil, 100, errors.New("revert"), true)
}
wr.OnExit(0, nil, 150, errors.New("revert"), true)
}
if tr.bal.Cmp(big.NewInt(0)) != 0 {
t.Fatalf("unexpected balance: %v", tr.bal)
}
if tr.nonce != 0 {
t.Fatalf("unexpected nonce: %v", tr.nonce)
}
}
// This test checks that changes in nested calls are reverted properly.
func TestJournalNestedCalls(t *testing.T) {
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnBalanceChange: tr.OnBalanceChange, OnNonceChange: tr.OnNonceChange})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}
addr := common.HexToAddress("0x1234")
{
wr.OnEnter(0, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnBalanceChange(addr, big.NewInt(0), big.NewInt(100), BalanceChangeUnspecified)
{
wr.OnEnter(1, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnBalanceChange(addr, big.NewInt(100), big.NewInt(200), BalanceChangeUnspecified)
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnExit(2, nil, 100, nil, false)
}
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnBalanceChange(addr, big.NewInt(200), big.NewInt(300), BalanceChangeUnspecified)
wr.OnExit(2, nil, 100, nil, false)
}
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnExit(2, nil, 100, nil, false)
}
wr.OnBalanceChange(addr, big.NewInt(300), big.NewInt(400), BalanceChangeUnspecified)
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnBalanceChange(addr, big.NewInt(400), big.NewInt(500), BalanceChangeUnspecified)
wr.OnExit(2, nil, 100, errors.New("revert"), true)
}
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnExit(2, nil, 100, errors.New("revert"), true)
}
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnBalanceChange(addr, big.NewInt(400), big.NewInt(600), BalanceChangeUnspecified)
wr.OnExit(2, nil, 100, nil, false)
}
wr.OnExit(1, nil, 100, errors.New("revert"), true)
}
wr.OnExit(0, nil, 150, nil, false)
}
if tr.bal.Uint64() != 100 {
t.Fatalf("unexpected balance: %v", tr.bal)
}
}
func TestNonceIncOnCreate(t *testing.T) {
const opCREATE = 0xf0
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnNonceChange: tr.OnNonceChange})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}
addr := common.HexToAddress("0x1234")
{
wr.OnEnter(0, opCREATE, addr, addr, nil, 1000, big.NewInt(0))
wr.OnNonceChangeV2(addr, 0, 1, NonceChangeContractCreator)
wr.OnExit(0, nil, 100, errors.New("revert"), true)
}
if tr.nonce != 1 {
t.Fatalf("unexpected nonce: %v", tr.nonce)
}
}
func TestOnNonceChangeV2(t *testing.T) {
tr := &testTracer{t: t}
wr, err := WrapWithJournal(&Hooks{OnNonceChangeV2: tr.OnNonceChangeV2})
if err != nil {
t.Fatalf("failed to wrap test tracer: %v", err)
}
addr := common.HexToAddress("0x1234")
{
wr.OnEnter(2, 0, addr, addr, nil, 1000, big.NewInt(0))
wr.OnNonceChangeV2(addr, 0, 1, NonceChangeEoACall)
wr.OnExit(2, nil, 100, nil, true)
}
if tr.nonce != 0 {
t.Fatalf("unexpected nonce: %v", tr.nonce)
}
}
func TestAllHooksCalled(t *testing.T) {
tracer := newTracerAllHooks()
hooks := tracer.hooks()
wrapped, err := WrapWithJournal(hooks)
if err != nil {
t.Fatalf("failed to wrap hooks with journal: %v", err)
}
// Get the underlying value of the wrapped hooks
wrappedValue := reflect.ValueOf(wrapped).Elem()
wrappedType := wrappedValue.Type()
// Iterate over all fields of the wrapped hooks
for i := 0; i < wrappedType.NumField(); i++ {
field := wrappedType.Field(i)
// Skip fields that are not function types
if field.Type.Kind() != reflect.Func {
continue
}
// Skip non-hooks, i.e. Copy
if field.Name == "copy" {
continue
}
// Skip if field is not set
if wrappedValue.Field(i).IsNil() {
continue
}
// Get the method
method := wrappedValue.Field(i)
// Call the method with zero values
params := make([]reflect.Value, method.Type().NumIn())
for j := 0; j < method.Type().NumIn(); j++ {
params[j] = reflect.Zero(method.Type().In(j))
}
method.Call(params)
}
// Check if all hooks were called
if tracer.numCalled() != tracer.hooksCount() {
t.Errorf("Not all hooks were called. Expected %d, got %d", tracer.hooksCount(), tracer.numCalled())
}
for hookName, called := range tracer.hooksCalled {
if !called {
t.Errorf("Hook %s was not called", hookName)
}
}
}
type tracerAllHooks struct {
hooksCalled map[string]bool
}
func newTracerAllHooks() *tracerAllHooks {
t := &tracerAllHooks{hooksCalled: make(map[string]bool)}
// Initialize all hooks to false. We will use this to
// get total count of hooks.
hooksType := reflect.TypeOf((*Hooks)(nil)).Elem()
for i := 0; i < hooksType.NumField(); i++ {
t.hooksCalled[hooksType.Field(i).Name] = false
}
delete(t.hooksCalled, "OnNonceChange")
return t
}
func (t *tracerAllHooks) hooksCount() int {
return len(t.hooksCalled)
}
func (t *tracerAllHooks) numCalled() int {
count := 0
for _, called := range t.hooksCalled {
if called {
count++
}
}
return count
}
func (t *tracerAllHooks) hooks() *Hooks {
h := &Hooks{}
// Create a function for each hook that sets the
// corresponding hooksCalled field to true.
hooksValue := reflect.ValueOf(h).Elem()
for i := 0; i < hooksValue.NumField(); i++ {
field := hooksValue.Type().Field(i)
if field.Name == "OnNonceChange" {
continue
}
hookMethod := reflect.MakeFunc(field.Type, func(args []reflect.Value) []reflect.Value {
t.hooksCalled[field.Name] = true
return nil
})
hooksValue.Field(i).Set(hookMethod)
}
return h
}

View file

@ -12,7 +12,7 @@
// GNU Lesser General Public License for more details. // GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/> // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core package core

View file

@ -12,7 +12,7 @@
// GNU Lesser General Public License for more details. // GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/> // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core package core

View file

@ -51,11 +51,6 @@ const (
// transaction. There can be multiple of these embedded into a single tx. // transaction. There can be multiple of these embedded into a single tx.
blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement
// maxBlobsPerTransaction is the maximum number of blobs a single transaction
// is allowed to contain. Whilst the spec states it's unlimited, the block
// data slots are protocol bound, which implicitly also limit this.
maxBlobsPerTransaction = params.MaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob
// txAvgSize is an approximate byte size of a transaction metadata to avoid // txAvgSize is an approximate byte size of a transaction metadata to avoid
// tiny overflows causing all txs to move a shelf higher, wasting disk space. // tiny overflows causing all txs to move a shelf higher, wasting disk space.
txAvgSize = 4 * 1024 txAvgSize = 4 * 1024
@ -223,6 +218,11 @@ func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta {
// very relaxed ones can be included even if the fees go up, when the closer // very relaxed ones can be included even if the fees go up, when the closer
// ones could already be invalid. // ones could already be invalid.
// //
// - Because the maximum number of blobs allowed in a block can change per
// fork, the pool is designed to handle the maximum number of blobs allowed
// in the chain's latest defined fork -- even if it isn't active. This
// avoids needing to upgrade the database around the fork boundary.
//
// When the pool eventually reaches saturation, some old transactions - that may // When the pool eventually reaches saturation, some old transactions - that may
// never execute - will need to be evicted in favor of newer ones. The eviction // never execute - will need to be evicted in favor of newer ones. The eviction
// strategy is quite complex: // strategy is quite complex:
@ -387,7 +387,8 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.Addres
fails = append(fails, id) fails = append(fails, id)
} }
} }
store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, newSlotter(), index) slotter := newSlotter(eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
store, err := billy.Open(billy.Options{Path: queuedir, Repair: true}, slotter, index)
if err != nil { if err != nil {
return err return err
} }
@ -414,13 +415,13 @@ func (p *BlobPool) Init(gasTip uint64, head *types.Header, reserve txpool.Addres
blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice) blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
) )
if p.head.ExcessBlobGas != nil { if p.head.ExcessBlobGas != nil {
blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas)) blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), p.head))
} }
p.evict = newPriceHeap(basefee, blobfee, p.index) p.evict = newPriceHeap(basefee, blobfee, p.index)
// Pool initialized, attach the blob limbo to it to track blobs included // Pool initialized, attach the blob limbo to it to track blobs included
// recently but not yet finalized // recently but not yet finalized
p.limbo, err = newLimbo(limbodir) p.limbo, err = newLimbo(limbodir, eip4844.LatestMaxBlobsPerBlock(p.chain.Config()))
if err != nil { if err != nil {
p.Close() p.Close()
return err return err
@ -834,7 +835,7 @@ func (p *BlobPool) Reset(oldHead, newHead *types.Header) {
blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice)) blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice))
) )
if newHead.ExcessBlobGas != nil { if newHead.ExcessBlobGas != nil {
blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*newHead.ExcessBlobGas)) blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(p.chain.Config(), newHead))
} }
p.evict.reinit(basefee, blobfee, false) p.evict.reinit(basefee, blobfee, false)
@ -1268,7 +1269,7 @@ func (p *BlobPool) GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844.
// Add inserts a set of blob transactions into the pool if they pass validation (both // Add inserts a set of blob transactions into the pool if they pass validation (both
// consensus validity and pool restrictions). // consensus validity and pool restrictions).
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error { func (p *BlobPool) Add(txs []*types.Transaction, sync bool) []error {
var ( var (
adds = make([]*types.Transaction, 0, len(txs)) adds = make([]*types.Transaction, 0, len(txs))
errs = make([]error, len(txs)) errs = make([]error, len(txs))
@ -1598,7 +1599,8 @@ func (p *BlobPool) updateStorageMetrics() {
metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots)) metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots))
metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots)) metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots))
if shelf.SlotSize/blobSize > maxBlobsPerTransaction { maxBlobs := eip4844.LatestMaxBlobsPerBlock(p.chain.Config())
if shelf.SlotSize/blobSize > uint32(maxBlobs) {
oversizedDataused += slotDataused oversizedDataused += slotDataused
oversizedDatagaps += slotDatagaps oversizedDatagaps += slotDatagaps
oversizedSlotused += shelf.FilledSlots oversizedSlotused += shelf.FilledSlots
@ -1699,13 +1701,6 @@ func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*ty
return []*types.Transaction{}, []*types.Transaction{} return []*types.Transaction{}, []*types.Transaction{}
} }
// Locals retrieves the accounts currently considered local by the pool.
//
// There is no notion of local accounts in the blob pool.
func (p *BlobPool) Locals() []common.Address {
return []common.Address{}
}
// Status returns the known status (unknown/pending/queued) of a transaction // Status returns the known status (unknown/pending/queued) of a transaction
// identified by their hashes. // identified by their hashes.
func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus { func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {

View file

@ -51,8 +51,10 @@ var (
testBlobVHashes [][32]byte testBlobVHashes [][32]byte
) )
const testMaxBlobsPerBlock = 6
func init() { func init() {
for i := 0; i < 10; i++ { for i := 0; i < 24; i++ {
testBlob := &kzg4844.Blob{byte(i)} testBlob := &kzg4844.Blob{byte(i)}
testBlobs = append(testBlobs, testBlob) testBlobs = append(testBlobs, testBlob)
@ -121,7 +123,12 @@ func (bc *testBlockChain) CurrentBlock() *types.Header {
mid := new(big.Int).Add(lo, hi) mid := new(big.Int).Add(lo, hi)
mid.Div(mid, big.NewInt(2)) mid.Div(mid, big.NewInt(2))
if eip4844.CalcBlobFee(mid.Uint64()).Cmp(bc.blobfee.ToBig()) > 0 { tmp := mid.Uint64()
if eip4844.CalcBlobFee(bc.Config(), &types.Header{
Number: blockNumber,
Time: blockTime,
ExcessBlobGas: &tmp,
}).Cmp(bc.blobfee.ToBig()) > 0 {
hi = mid hi = mid
} else { } else {
lo = mid lo = mid
@ -194,10 +201,43 @@ func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64,
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx) return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
} }
// makeMultiBlobTx is a utility method to construct a ramdom blob tx with
// certain number of blobs in its sidecar.
func makeMultiBlobTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, blobCount int, key *ecdsa.PrivateKey) *types.Transaction {
var (
blobs []kzg4844.Blob
blobHashes []common.Hash
commitments []kzg4844.Commitment
proofs []kzg4844.Proof
)
for i := 0; i < blobCount; i++ {
blobs = append(blobs, *testBlobs[i])
commitments = append(commitments, testBlobCommits[i])
proofs = append(proofs, testBlobProofs[i])
blobHashes = append(blobHashes, testBlobVHashes[i])
}
blobtx := &types.BlobTx{
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
Nonce: nonce,
GasTipCap: uint256.NewInt(gasTipCap),
GasFeeCap: uint256.NewInt(gasFeeCap),
Gas: 21000,
BlobFeeCap: uint256.NewInt(blobFeeCap),
BlobHashes: blobHashes,
Value: uint256.NewInt(100),
Sidecar: &types.BlobTxSidecar{
Blobs: blobs,
Commitments: commitments,
Proofs: proofs,
},
}
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
}
// makeUnsignedTx is a utility method to construct a random blob transaction // makeUnsignedTx is a utility method to construct a random blob transaction
// without signing it. // without signing it.
func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx { func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rand.Intn(len(testBlobs))) return makeUnsignedTxWithTestBlob(nonce, gasTipCap, gasFeeCap, blobFeeCap, rnd.Intn(len(testBlobs)))
} }
// makeUnsignedTx is a utility method to construct a random blob transaction // makeUnsignedTx is a utility method to construct a random blob transaction
@ -415,7 +455,7 @@ func TestOpenDrops(t *testing.T) {
defer os.RemoveAll(storage) defer os.RemoveAll(storage)
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
// Insert a malformed transaction to verify that decoding errors (or format // Insert a malformed transaction to verify that decoding errors (or format
// changes) are handled gracefully (case 1) // changes) are handled gracefully (case 1)
@ -640,9 +680,9 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3) statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3, tracing.NonceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2) statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2, tracing.NonceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
@ -738,7 +778,7 @@ func TestOpenIndex(t *testing.T) {
defer os.RemoveAll(storage) defer os.RemoveAll(storage)
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
// Insert a sequence of transactions with varying price points to check that // Insert a sequence of transactions with varying price points to check that
// the cumulative minimum will be maintained. // the cumulative minimum will be maintained.
@ -827,7 +867,7 @@ func TestOpenHeap(t *testing.T) {
defer os.RemoveAll(storage) defer os.RemoveAll(storage)
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
// Insert a few transactions from a few accounts. To remove randomness from // Insert a few transactions from a few accounts. To remove randomness from
// the heap initialization, use a deterministic account/tx/priority ordering. // the heap initialization, use a deterministic account/tx/priority ordering.
@ -914,7 +954,7 @@ func TestOpenCap(t *testing.T) {
defer os.RemoveAll(storage) defer os.RemoveAll(storage)
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
// Insert a few transactions from a few accounts // Insert a few transactions from a few accounts
var ( var (
@ -992,6 +1032,108 @@ func TestOpenCap(t *testing.T) {
} }
} }
// TestChangingSlotterSize attempts to mimic a scenario where the max blob count
// of the pool is increased. This would happen during a client release where a
// new fork is added with a max blob count higher than the previous fork. We
// want to make sure transactions a persisted between those runs.
func TestChangingSlotterSize(t *testing.T) {
//log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// Create a temporary folder for the persistent backend
storage, _ := os.MkdirTemp("", "blobpool-")
defer os.RemoveAll(storage)
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(6), nil)
// Create transactions from a few accounts.
var (
key1, _ = crypto.GenerateKey()
key2, _ = crypto.GenerateKey()
key3, _ = crypto.GenerateKey()
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
tx1 = makeMultiBlobTx(0, 1, 1000, 100, 6, key1)
tx2 = makeMultiBlobTx(0, 1, 800, 70, 6, key2)
tx3 = makeMultiBlobTx(0, 1, 800, 110, 24, key3)
blob1, _ = rlp.EncodeToBytes(tx1)
blob2, _ = rlp.EncodeToBytes(tx2)
)
// Write the two safely sized txs to store. note: although the store is
// configured for a blob count of 6, it can also support around ~1mb of call
// data - all this to say that we aren't using the the absolute largest shelf
// available.
store.Put(blob1)
store.Put(blob2)
store.Close()
// Mimic a blobpool with max blob count of 6 upgrading to a max blob count of 24.
for _, maxBlobs := range []int{6, 24} {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true, false)
// Make custom chain config where the max blob count changes based on the loop variable.
cancunTime := uint64(0)
config := &params.ChainConfig{
ChainID: big.NewInt(1),
LondonBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
CancunTime: &cancunTime,
BlobScheduleConfig: &params.BlobScheduleConfig{
Cancun: &params.BlobConfig{
Target: maxBlobs / 2,
Max: maxBlobs,
UpdateFraction: params.DefaultCancunBlobConfig.UpdateFraction,
},
},
}
chain := &testBlockChain{
config: config,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
}
pool := New(Config{Datadir: storage}, chain)
if err := pool.Init(1, chain.CurrentBlock(), makeAddressReserver()); err != nil {
t.Fatalf("failed to create blob pool: %v", err)
}
// Try to add the big blob tx. In the initial iteration it should overflow
// the pool. On the subsequent iteration it should be accepted.
errs := pool.Add([]*types.Transaction{tx3}, true)
if _, ok := pool.index[addr3]; ok && maxBlobs == 6 {
t.Errorf("expected insert of oversized blob tx to fail: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
} else if !ok && maxBlobs == 10 {
t.Errorf("expected insert of oversized blob tx to succeed: blobs=24, maxBlobs=%d, err=%v", maxBlobs, errs[0])
}
// Verify the regular two txs are always available.
if got := pool.Get(tx1.Hash()); got == nil {
t.Errorf("expected tx %s from %s in pool", tx1.Hash(), addr1)
}
if got := pool.Get(tx2.Hash()); got == nil {
t.Errorf("expected tx %s from %s in pool", tx2.Hash(), addr2)
}
// Verify all the calculated pool internals. Interestingly, this is **not**
// a duplication of the above checks, this actually validates the verifier
// using the above already hard coded checks.
//
// Do not remove this, nor alter the above to be generic.
verifyPoolInternals(t, pool)
pool.Close()
}
}
// Tests that adding transaction will correctly store it in the persistent store // Tests that adding transaction will correctly store it in the persistent store
// and update all the indices. // and update all the indices.
// //
@ -1369,7 +1511,7 @@ func TestAdd(t *testing.T) {
defer os.RemoveAll(storage) // late defer, still ok defer os.RemoveAll(storage) // late defer, still ok
os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700)
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(testMaxBlobsPerBlock), nil)
// Insert the seed transactions for the pool startup // Insert the seed transactions for the pool startup
var ( var (
@ -1384,7 +1526,7 @@ func TestAdd(t *testing.T) {
// Seed the state database with this account // Seed the state database with this account
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance), tracing.BalanceChangeUnspecified) statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance), tracing.BalanceChangeUnspecified)
statedb.SetNonce(addrs[acc], seed.nonce) statedb.SetNonce(addrs[acc], seed.nonce, tracing.NonceChangeUnspecified)
// Sign the seed transactions and store them in the data store // Sign the seed transactions and store them in the data store
for _, tx := range seed.txs { for _, tx := range seed.txs {
@ -1439,7 +1581,7 @@ func TestAdd(t *testing.T) {
// Apply the nonce updates to the state db // Apply the nonce updates to the state db
for _, tx := range txs { for _, tx := range txs {
sender, _ := types.Sender(types.LatestSigner(params.MainnetChainConfig), tx) sender, _ := types.Sender(types.LatestSigner(params.MainnetChainConfig), tx)
chain.statedb.SetNonce(sender, tx.Nonce()+1) chain.statedb.SetNonce(sender, tx.Nonce()+1, tracing.NonceChangeUnspecified)
} }
pool.Reset(chain.CurrentBlock(), header) pool.Reset(chain.CurrentBlock(), header)
verifyPoolInternals(t, pool) verifyPoolInternals(t, pool)

View file

@ -26,7 +26,7 @@ import (
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
var rand = mrand.New(mrand.NewSource(1)) var rnd = mrand.New(mrand.NewSource(1))
// verifyHeapInternals verifies that all accounts present in the index are also // verifyHeapInternals verifies that all accounts present in the index are also
// present in the heap and internals are consistent across various indices. // present in the heap and internals are consistent across various indices.
@ -193,12 +193,12 @@ func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) {
index := make(map[common.Address][]*blobTxMeta) index := make(map[common.Address][]*blobTxMeta)
for i := 0; i < int(blobs); i++ { for i := 0; i < int(blobs); i++ {
var addr common.Address var addr common.Address
rand.Read(addr[:]) rnd.Read(addr[:])
var ( var (
execTip = uint256.NewInt(rand.Uint64()) execTip = uint256.NewInt(rnd.Uint64())
execFee = uint256.NewInt(rand.Uint64()) execFee = uint256.NewInt(rnd.Uint64())
blobFee = uint256.NewInt(rand.Uint64()) blobFee = uint256.NewInt(rnd.Uint64())
basefeeJumps = dynamicFeeJumps(execFee) basefeeJumps = dynamicFeeJumps(execFee)
blobfeeJumps = dynamicFeeJumps(blobFee) blobfeeJumps = dynamicFeeJumps(blobFee)
@ -218,13 +218,13 @@ func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) {
}} }}
} }
// Create a price heap and reinit it over and over // Create a price heap and reinit it over and over
heap := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), index) heap := newPriceHeap(uint256.NewInt(rnd.Uint64()), uint256.NewInt(rnd.Uint64()), index)
basefees := make([]*uint256.Int, b.N) basefees := make([]*uint256.Int, b.N)
blobfees := make([]*uint256.Int, b.N) blobfees := make([]*uint256.Int, b.N)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
basefees[i] = uint256.NewInt(rand.Uint64()) basefees[i] = uint256.NewInt(rnd.Uint64())
blobfees[i] = uint256.NewInt(rand.Uint64()) blobfees[i] = uint256.NewInt(rnd.Uint64())
} }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
@ -269,12 +269,12 @@ func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
index := make(map[common.Address][]*blobTxMeta) index := make(map[common.Address][]*blobTxMeta)
for i := 0; i < int(blobs); i++ { for i := 0; i < int(blobs); i++ {
var addr common.Address var addr common.Address
rand.Read(addr[:]) rnd.Read(addr[:])
var ( var (
execTip = uint256.NewInt(rand.Uint64()) execTip = uint256.NewInt(rnd.Uint64())
execFee = uint256.NewInt(rand.Uint64()) execFee = uint256.NewInt(rnd.Uint64())
blobFee = uint256.NewInt(rand.Uint64()) blobFee = uint256.NewInt(rnd.Uint64())
basefeeJumps = dynamicFeeJumps(execFee) basefeeJumps = dynamicFeeJumps(execFee)
blobfeeJumps = dynamicFeeJumps(blobFee) blobfeeJumps = dynamicFeeJumps(blobFee)
@ -294,18 +294,18 @@ func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) {
}} }}
} }
// Create a price heap and overflow it over and over // Create a price heap and overflow it over and over
evict := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), index) evict := newPriceHeap(uint256.NewInt(rnd.Uint64()), uint256.NewInt(rnd.Uint64()), index)
var ( var (
addrs = make([]common.Address, b.N) addrs = make([]common.Address, b.N)
metas = make([]*blobTxMeta, b.N) metas = make([]*blobTxMeta, b.N)
) )
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
rand.Read(addrs[i][:]) rnd.Read(addrs[i][:])
var ( var (
execTip = uint256.NewInt(rand.Uint64()) execTip = uint256.NewInt(rnd.Uint64())
execFee = uint256.NewInt(rand.Uint64()) execFee = uint256.NewInt(rnd.Uint64())
blobFee = uint256.NewInt(rand.Uint64()) blobFee = uint256.NewInt(rnd.Uint64())
basefeeJumps = dynamicFeeJumps(execFee) basefeeJumps = dynamicFeeJumps(execFee)
blobfeeJumps = dynamicFeeJumps(blobFee) blobfeeJumps = dynamicFeeJumps(blobFee)

View file

@ -48,7 +48,7 @@ type limbo struct {
} }
// newLimbo opens and indexes a set of limboed blob transactions. // newLimbo opens and indexes a set of limboed blob transactions.
func newLimbo(datadir string) (*limbo, error) { func newLimbo(datadir string, maxBlobsPerTransaction int) (*limbo, error) {
l := &limbo{ l := &limbo{
index: make(map[common.Hash]uint64), index: make(map[common.Hash]uint64),
groups: make(map[uint64]map[uint64]common.Hash), groups: make(map[uint64]map[uint64]common.Hash),
@ -60,7 +60,7 @@ func newLimbo(datadir string) (*limbo, error) {
fails = append(fails, id) fails = append(fails, id)
} }
} }
store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(), index) store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(maxBlobsPerTransaction), index)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -1,4 +1,4 @@
// Copyright 2022 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify

View file

@ -52,7 +52,7 @@ func TestPriorityCalculation(t *testing.T) {
func BenchmarkDynamicFeeJumpCalculation(b *testing.B) { func BenchmarkDynamicFeeJumpCalculation(b *testing.B) {
fees := make([]*uint256.Int, b.N) fees := make([]*uint256.Int, b.N)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
fees[i] = uint256.NewInt(rand.Uint64()) fees[i] = uint256.NewInt(rnd.Uint64())
} }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
@ -76,8 +76,8 @@ func BenchmarkPriorityCalculation(b *testing.B) {
txBasefeeJumps := make([]float64, b.N) txBasefeeJumps := make([]float64, b.N)
txBlobfeeJumps := make([]float64, b.N) txBlobfeeJumps := make([]float64, b.N)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rnd.Uint64()))
txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rnd.Uint64()))
} }
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()

View file

@ -25,13 +25,13 @@ package blobpool
// The slotter also creates a shelf for 0-blob transactions. Whilst those are not // The slotter also creates a shelf for 0-blob transactions. Whilst those are not
// allowed in the current protocol, having an empty shelf is not a relevant use // allowed in the current protocol, having an empty shelf is not a relevant use
// of resources, but it makes stress testing with junk transactions simpler. // of resources, but it makes stress testing with junk transactions simpler.
func newSlotter() func() (uint32, bool) { func newSlotter(maxBlobsPerTransaction int) func() (uint32, bool) {
slotsize := uint32(txAvgSize) slotsize := uint32(txAvgSize)
slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return
return func() (size uint32, done bool) { return func() (size uint32, done bool) {
slotsize += blobSize slotsize += blobSize
finished := slotsize > maxBlobsPerTransaction*blobSize+txMaxSize finished := slotsize > uint32(maxBlobsPerTransaction)*blobSize+txMaxSize
return slotsize, finished return slotsize, finished
} }

View file

@ -21,7 +21,7 @@ import "testing"
// Tests that the slotter creates the expected database shelves. // Tests that the slotter creates the expected database shelves.
func TestNewSlotter(t *testing.T) { func TestNewSlotter(t *testing.T) {
// Generate the database shelve sizes // Generate the database shelve sizes
slotter := newSlotter() slotter := newSlotter(6)
var shelves []uint32 var shelves []uint32
for { for {

View file

@ -99,7 +99,6 @@ var (
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
@ -159,10 +158,6 @@ var DefaultConfig = Config{
// unreasonable or unworkable. // unreasonable or unworkable.
func (config *Config) sanitize() Config { func (config *Config) sanitize() Config {
conf := *config conf := *config
if conf.Rejournal < time.Second {
log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second)
conf.Rejournal = time.Second
}
if conf.PriceLimit < 1 { if conf.PriceLimit < 1 {
log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit)
conf.PriceLimit = DefaultConfig.PriceLimit conf.PriceLimit = DefaultConfig.PriceLimit
@ -214,9 +209,6 @@ type LegacyPool struct {
currentState *state.StateDB // Current state in the blockchain head currentState *state.StateDB // Current state in the blockchain head
pendingNonces *noncer // Pending state tracking virtual nonces pendingNonces *noncer // Pending state tracking virtual nonces
locals *accountSet // Set of local transaction to exempt from eviction rules
journal *journal // Journal of local transaction to back up to disk
reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools
pending map[common.Address]*list // All currently processable transactions pending map[common.Address]*list // All currently processable transactions
queue map[common.Address]*list // Queued but non-processable transactions queue map[common.Address]*list // Queued but non-processable transactions
@ -262,16 +254,8 @@ func New(config Config, chain BlockChain) *LegacyPool {
reorgShutdownCh: make(chan struct{}), reorgShutdownCh: make(chan struct{}),
initDoneCh: make(chan struct{}), initDoneCh: make(chan struct{}),
} }
pool.locals = newAccountSet(pool.signer)
for _, addr := range config.Locals {
log.Info("Setting new local account", "address", addr)
pool.locals.add(addr)
}
pool.priced = newPricedList(pool.all) pool.priced = newPricedList(pool.all)
if !config.NoLocals && config.Journal != "" {
pool.journal = newTxJournal(config.Journal)
}
return pool return pool
} }
@ -287,8 +271,7 @@ func (pool *LegacyPool) Filter(tx *types.Transaction) bool {
} }
// Init sets the gas price needed to keep a transaction in the pool and the chain // Init sets the gas price needed to keep a transaction in the pool and the chain
// head to allow balance / nonce checks. The transaction journal will be loaded // head to allow balance / nonce checks. The internal
// from disk and filtered based on the provided starting settings. The internal
// goroutines will be spun up and the pool deemed operational afterwards. // goroutines will be spun up and the pool deemed operational afterwards.
func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error { func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.AddressReserver) error {
// Set the address reserver to request exclusive access to pooled accounts // Set the address reserver to request exclusive access to pooled accounts
@ -311,20 +294,9 @@ func (pool *LegacyPool) Init(gasTip uint64, head *types.Header, reserve txpool.A
pool.currentState = statedb pool.currentState = statedb
pool.pendingNonces = newNoncer(statedb) pool.pendingNonces = newNoncer(statedb)
// Start the reorg loop early, so it can handle requests generated during
// journal loading.
pool.wg.Add(1) pool.wg.Add(1)
go pool.scheduleReorgLoop() go pool.scheduleReorgLoop()
// If local transactions and journaling is enabled, load from disk
if pool.journal != nil {
if err := pool.journal.load(pool.addLocals); err != nil {
log.Warn("Failed to load transaction journal", "err", err)
}
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate transaction journal", "err", err)
}
}
pool.wg.Add(1) pool.wg.Add(1)
go pool.loop() go pool.loop()
return nil return nil
@ -340,13 +312,11 @@ func (pool *LegacyPool) loop() {
prevPending, prevQueued, prevStales int prevPending, prevQueued, prevStales int
// Start the stats reporting and transaction eviction tickers // Start the stats reporting and transaction eviction tickers
report = time.NewTicker(statsReportInterval) report = time.NewTicker(statsReportInterval)
evict = time.NewTicker(evictionInterval) evict = time.NewTicker(evictionInterval)
journal = time.NewTicker(pool.config.Rejournal)
) )
defer report.Stop() defer report.Stop()
defer evict.Stop() defer evict.Stop()
defer journal.Stop()
// Notify tests that the init phase is done // Notify tests that the init phase is done
close(pool.initDoneCh) close(pool.initDoneCh)
@ -372,11 +342,7 @@ func (pool *LegacyPool) loop() {
case <-evict.C: case <-evict.C:
pool.mu.Lock() pool.mu.Lock()
for addr := range pool.queue { for addr := range pool.queue {
// Skip local transactions from the eviction mechanism // Any old enough should be removed
if pool.locals.contains(addr) {
continue
}
// Any non-locals old enough should be removed
if time.Since(pool.beats[addr]) > pool.config.Lifetime { if time.Since(pool.beats[addr]) > pool.config.Lifetime {
list := pool.queue[addr].Flatten() list := pool.queue[addr].Flatten()
for _, tx := range list { for _, tx := range list {
@ -386,16 +352,6 @@ func (pool *LegacyPool) loop() {
} }
} }
pool.mu.Unlock() pool.mu.Unlock()
// Handle local transaction journal rotation
case <-journal.C:
if pool.journal != nil {
pool.mu.Lock()
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate local tx journal", "err", err)
}
pool.mu.Unlock()
}
} }
} }
} }
@ -406,9 +362,6 @@ func (pool *LegacyPool) Close() error {
close(pool.reorgShutdownCh) close(pool.reorgShutdownCh)
pool.wg.Wait() pool.wg.Wait()
if pool.journal != nil {
pool.journal.close()
}
log.Info("Transaction pool stopped") log.Info("Transaction pool stopped")
return nil return nil
} }
@ -444,7 +397,7 @@ func (pool *LegacyPool) SetGasTip(tip *big.Int) {
// If the min miner fee increased, remove transactions below the new threshold // If the min miner fee increased, remove transactions below the new threshold
if newTip.Cmp(old) > 0 { if newTip.Cmp(old) > 0 {
// pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead
drop := pool.all.RemotesBelowTip(tip) drop := pool.all.TxsBelowTip(tip)
for _, tx := range drop { for _, tx := range drop {
pool.removeTx(tx.Hash(), false, true) pool.removeTx(tx.Hash(), false, true)
} }
@ -549,7 +502,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address]
txs := list.Flatten() txs := list.Flatten()
// If the miner requests tip enforcement, cap the lists now // If the miner requests tip enforcement, cap the lists now
if minTipBig != nil && !pool.locals.contains(addr) { if minTipBig != nil {
for i, tx := range txs { for i, tx := range txs {
if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 { if tx.EffectiveGasTipIntCmp(minTipBig, baseFeeBig) < 0 {
txs = txs[:i] txs = txs[:i]
@ -577,35 +530,11 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address]
return pending return pending
} }
// Locals retrieves the accounts currently considered local by the pool.
func (pool *LegacyPool) Locals() []common.Address {
pool.mu.Lock()
defer pool.mu.Unlock()
return pool.locals.flatten()
}
// local retrieves all currently known local transactions, grouped by origin
// account and sorted by nonce. The returned transaction set is a copy and can be
// freely modified by calling code.
func (pool *LegacyPool) local() map[common.Address]types.Transactions {
txs := make(map[common.Address]types.Transactions)
for addr := range pool.locals.accounts {
if pending := pool.pending[addr]; pending != nil {
txs[addr] = append(txs[addr], pending.Flatten()...)
}
if queued := pool.queue[addr]; queued != nil {
txs[addr] = append(txs[addr], queued.Flatten()...)
}
}
return txs
}
// validateTxBasics checks whether a transaction is valid according to the consensus // validateTxBasics checks whether a transaction is valid according to the consensus
// rules, but does not check state-dependent validation such as sufficient balance. // rules, but does not check state-dependent validation such as sufficient balance.
// This check is meant as an early check which only needs to be performed once, // This check is meant as an early check which only needs to be performed once,
// and does not require the pool mutex to be held. // and does not require the pool mutex to be held.
func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error { func (pool *LegacyPool) validateTxBasics(tx *types.Transaction) error {
opts := &txpool.ValidationOptions{ opts := &txpool.ValidationOptions{
Config: pool.chainconfig, Config: pool.chainconfig,
Accept: 0 | Accept: 0 |
@ -615,9 +544,6 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) erro
MaxSize: txMaxSize, MaxSize: txMaxSize,
MinTip: pool.gasTip.Load().ToBig(), MinTip: pool.gasTip.Load().ToBig(),
} }
if local {
opts.MinTip = new(big.Int)
}
if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil { if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil {
return err return err
} }
@ -665,11 +591,7 @@ func (pool *LegacyPool) validateTx(tx *types.Transaction) error {
// add validates a transaction and inserts it into the non-executable queue for later // add validates a transaction and inserts it into the non-executable queue for later
// pending promotion and execution. If the transaction is a replacement for an already // pending promotion and execution. If the transaction is a replacement for an already
// pending or queued one, it overwrites the previous transaction if its price is higher. // pending or queued one, it overwrites the previous transaction if its price is higher.
// func (pool *LegacyPool) add(tx *types.Transaction) (replaced bool, err error) {
// If a newly added transaction is marked as local, its sending account will be
// added to the allowlist, preventing any associated transaction from being dropped
// out of the pool due to pricing constraints.
func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, err error) {
// If the transaction is already known, discard it // If the transaction is already known, discard it
hash := tx.Hash() hash := tx.Hash()
if pool.all.Get(hash) != nil { if pool.all.Get(hash) != nil {
@ -677,9 +599,6 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
knownTxMeter.Mark(1) knownTxMeter.Mark(1)
return false, txpool.ErrAlreadyKnown return false, txpool.ErrAlreadyKnown
} }
// Make the local flag. If it's from local source or it's from the network but
// the sender is marked as local previously, treat it as the local transaction.
isLocal := local || pool.locals.containsTx(tx)
// If the transaction fails basic validation, discard it // If the transaction fails basic validation, discard it
if err := pool.validateTx(tx); err != nil { if err := pool.validateTx(tx); err != nil {
@ -715,7 +634,7 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
// If the transaction pool is full, discard underpriced transactions // If the transaction pool is full, discard underpriced transactions
if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it // If the new transaction is underpriced, don't accept it
if !isLocal && pool.priced.Underpriced(tx) { if pool.priced.Underpriced(tx) {
log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
underpricedTxMeter.Mark(1) underpricedTxMeter.Mark(1)
return false, txpool.ErrUnderpriced return false, txpool.ErrUnderpriced
@ -731,19 +650,18 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
} }
// New transaction is better than our worse ones, make room for it. // New transaction is better than our worse ones, make room for it.
// If it's a local transaction, forcibly discard all available transactions. // If we can't make enough room for new one, abort the operation.
// Otherwise if we can't make enough room for new one, abort the operation. drop, success := pool.priced.Discard(pool.all.Slots() - int(pool.config.GlobalSlots+pool.config.GlobalQueue) + numSlots(tx))
drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal)
// Special case, we still can't make the room for the new remote one. // Special case, we still can't make the room for the new remote one.
if !isLocal && !success { if !success {
log.Trace("Discarding overflown transaction", "hash", hash) log.Trace("Discarding overflown transaction", "hash", hash)
overflowedTxMeter.Mark(1) overflowedTxMeter.Mark(1)
return false, ErrTxPoolOverflow return false, ErrTxPoolOverflow
} }
// If the new transaction is a future transaction it should never churn pending transactions // If the new transaction is a future transaction it should never churn pending transactions
if !isLocal && pool.isGapped(from, tx) { if pool.isGapped(from, tx) {
var replacesPending bool var replacesPending bool
for _, dropTx := range drop { for _, dropTx := range drop {
dropSender, _ := types.Sender(pool.signer, dropTx) dropSender, _ := types.Sender(pool.signer, dropTx)
@ -755,7 +673,7 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
// Add all transactions back to the priced queue // Add all transactions back to the priced queue
if replacesPending { if replacesPending {
for _, dropTx := range drop { for _, dropTx := range drop {
pool.priced.Put(dropTx, false) pool.priced.Put(dropTx)
} }
log.Trace("Discarding future transaction replacing pending tx", "hash", hash) log.Trace("Discarding future transaction replacing pending tx", "hash", hash)
return false, txpool.ErrFutureReplacePending return false, txpool.ErrFutureReplacePending
@ -788,9 +706,8 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
pool.priced.Removed(1) pool.priced.Removed(1)
pendingReplaceMeter.Mark(1) pendingReplaceMeter.Mark(1)
} }
pool.all.Add(tx, isLocal) pool.all.Add(tx)
pool.priced.Put(tx, isLocal) pool.priced.Put(tx)
pool.journalTx(from, tx)
pool.queueTxEvent(tx) pool.queueTxEvent(tx)
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
@ -799,20 +716,10 @@ func (pool *LegacyPool) add(tx *types.Transaction, local bool) (replaced bool, e
return old != nil, nil return old != nil, nil
} }
// New transaction isn't replacing a pending one, push into queue // New transaction isn't replacing a pending one, push into queue
replaced, err = pool.enqueueTx(hash, tx, isLocal, true) replaced, err = pool.enqueueTx(hash, tx, true)
if err != nil { if err != nil {
return false, err return false, err
} }
// Mark local addresses and journal local transactions
if local && !pool.locals.contains(from) {
log.Info("Setting new local account", "address", from)
pool.locals.add(from)
pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
}
if isLocal {
localGauge.Inc(1)
}
pool.journalTx(from, tx)
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
return replaced, nil return replaced, nil
@ -845,7 +752,7 @@ func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) boo
// enqueueTx inserts a new transaction into the non-executable transaction queue. // enqueueTx inserts a new transaction into the non-executable transaction queue.
// //
// Note, this method assumes the pool lock is held! // Note, this method assumes the pool lock is held!
func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, addAll bool) (bool, error) {
// Try to insert the transaction into the future queue // Try to insert the transaction into the future queue
from, _ := types.Sender(pool.signer, tx) // already validated from, _ := types.Sender(pool.signer, tx) // already validated
if pool.queue[from] == nil { if pool.queue[from] == nil {
@ -872,8 +779,8 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local
log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) log.Error("Missing transaction in lookup set, please report the issue", "hash", hash)
} }
if addAll { if addAll {
pool.all.Add(tx, local) pool.all.Add(tx)
pool.priced.Put(tx, local) pool.priced.Put(tx)
} }
// If we never record the heartbeat, do it right now. // If we never record the heartbeat, do it right now.
if _, exist := pool.beats[from]; !exist { if _, exist := pool.beats[from]; !exist {
@ -882,18 +789,6 @@ func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local
return old != nil, nil return old != nil, nil
} }
// journalTx adds the specified transaction to the local disk journal if it is
// deemed to have been sent from a local account.
func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) {
// Only journal if it's enabled and the transaction is local
if pool.journal == nil || !pool.locals.contains(from) {
return
}
if err := pool.journal.insert(tx); err != nil {
log.Warn("Failed to journal local transaction", "err", err)
}
}
// promoteTx adds a transaction to the pending (processable) list of transactions // promoteTx adds a transaction to the pending (processable) list of transactions
// and returns whether it was inserted or an older was better. // and returns whether it was inserted or an older was better.
// //
@ -930,28 +825,13 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ
return true return true
} }
// addLocals enqueues a batch of transactions into the pool if they are valid, marking the // addRemotes enqueues a batch of transactions into the pool if they are valid.
// senders as local ones, ensuring they go around the local pricing constraints. // Full pricing constraints will apply.
//
// This method is used to add transactions from the RPC API and performs synchronous pool
// reorganization and event propagation.
func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error {
return pool.Add(txs, !pool.config.NoLocals, true)
}
// addLocal enqueues a single local transaction into the pool if it is valid. This is
// a convenience wrapper around addLocals.
func (pool *LegacyPool) addLocal(tx *types.Transaction) error {
return pool.addLocals([]*types.Transaction{tx})[0]
}
// addRemotes enqueues a batch of transactions into the pool if they are valid. If the
// senders are not among the locally tracked ones, full pricing constraints will apply.
// //
// This method is used to add transactions from the p2p network and does not wait for pool // This method is used to add transactions from the p2p network and does not wait for pool
// reorganization and internal event propagation. // reorganization and internal event propagation.
func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error { func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error {
return pool.Add(txs, false, false) return pool.Add(txs, false)
} }
// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience // addRemote enqueues a single transaction into the pool if it is valid. This is a convenience
@ -962,23 +842,19 @@ func (pool *LegacyPool) addRemote(tx *types.Transaction) error {
// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method. // addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method.
func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error { func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error {
return pool.Add(txs, false, true) return pool.Add(txs, true)
} }
// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method. // This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method.
func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error {
return pool.Add([]*types.Transaction{tx}, false, true)[0] return pool.Add([]*types.Transaction{tx}, true)[0]
} }
// Add enqueues a batch of transactions into the pool if they are valid. Depending // Add enqueues a batch of transactions into the pool if they are valid.
// on the local flag, full pricing constraints will or will not be applied.
// //
// If sync is set, the method will block until all internal maintenance related // If sync is set, the method will block until all internal maintenance related
// to the add is finished. Only use this during tests for determinism! // to the add is finished. Only use this during tests for determinism!
func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error { func (pool *LegacyPool) Add(txs []*types.Transaction, sync bool) []error {
// Do not treat as local if local transactions have been disabled
local = local && !pool.config.NoLocals
// Filter out known ones without obtaining the pool lock or recovering signatures // Filter out known ones without obtaining the pool lock or recovering signatures
var ( var (
errs = make([]error, len(txs)) errs = make([]error, len(txs))
@ -994,7 +870,7 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error
// Exclude transactions with basic errors, e.g invalid signatures and // Exclude transactions with basic errors, e.g invalid signatures and
// insufficient intrinsic gas as soon as possible and cache senders // insufficient intrinsic gas as soon as possible and cache senders
// in transactions before obtaining lock // in transactions before obtaining lock
if err := pool.validateTxBasics(tx, local); err != nil { if err := pool.validateTxBasics(tx); err != nil {
errs[i] = err errs[i] = err
log.Trace("Discarding invalid transaction", "hash", tx.Hash(), "err", err) log.Trace("Discarding invalid transaction", "hash", tx.Hash(), "err", err)
invalidTxMeter.Mark(1) invalidTxMeter.Mark(1)
@ -1009,7 +885,7 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error
// Process all the new transaction and merge any errors into the original slice // Process all the new transaction and merge any errors into the original slice
pool.mu.Lock() pool.mu.Lock()
newErrs, dirtyAddrs := pool.addTxsLocked(news, local) newErrs, dirtyAddrs := pool.addTxsLocked(news)
pool.mu.Unlock() pool.mu.Unlock()
var nilSlot = 0 var nilSlot = 0
@ -1030,11 +906,11 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, local, sync bool) []error
// addTxsLocked attempts to queue a batch of transactions if they are valid. // addTxsLocked attempts to queue a batch of transactions if they are valid.
// The transaction pool lock must be held. // The transaction pool lock must be held.
func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction) ([]error, *accountSet) {
dirty := newAccountSet(pool.signer) dirty := newAccountSet(pool.signer)
errs := make([]error, len(txs)) errs := make([]error, len(txs))
for i, tx := range txs { for i, tx := range txs {
replaced, err := pool.add(tx, local) replaced, err := pool.add(tx)
errs[i] = err errs[i] = err
if err == nil && !replaced { if err == nil && !replaced {
dirty.addTx(tx) dirty.addTx(tx)
@ -1126,9 +1002,6 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo
if outofbound { if outofbound {
pool.priced.Removed(1) pool.priced.Removed(1)
} }
if pool.locals.contains(addr) {
localGauge.Dec(1)
}
// Remove the transaction from the pending lists and reset the account nonce // Remove the transaction from the pending lists and reset the account nonce
if pending := pool.pending[addr]; pending != nil { if pending := pool.pending[addr]; pending != nil {
if removed, invalids := pending.Remove(tx); removed { if removed, invalids := pending.Remove(tx); removed {
@ -1139,7 +1012,7 @@ func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bo
// Postpone any invalidated transactions // Postpone any invalidated transactions
for _, tx := range invalids { for _, tx := range invalids {
// Internal shuffle shouldn't touch the lookup set. // Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(tx.Hash(), tx, false, false) pool.enqueueTx(tx.Hash(), tx, false)
} }
// Update the account nonce if needed // Update the account nonce if needed
pool.pendingNonces.setIfLower(addr, tx.Nonce()) pool.pendingNonces.setIfLower(addr, tx.Nonce())
@ -1204,7 +1077,7 @@ func (pool *LegacyPool) scheduleReorgLoop() {
launchNextRun bool launchNextRun bool
reset *txpoolResetRequest reset *txpoolResetRequest
dirtyAccounts *accountSet dirtyAccounts *accountSet
queuedEvents = make(map[common.Address]*sortedMap) queuedEvents = make(map[common.Address]*SortedMap)
) )
for { for {
// Launch next background reorg if needed // Launch next background reorg if needed
@ -1217,7 +1090,7 @@ func (pool *LegacyPool) scheduleReorgLoop() {
launchNextRun = false launchNextRun = false
reset, dirtyAccounts = nil, nil reset, dirtyAccounts = nil, nil
queuedEvents = make(map[common.Address]*sortedMap) queuedEvents = make(map[common.Address]*SortedMap)
} }
select { select {
@ -1246,7 +1119,7 @@ func (pool *LegacyPool) scheduleReorgLoop() {
// request one later if they want the events sent. // request one later if they want the events sent.
addr, _ := types.Sender(pool.signer, tx) addr, _ := types.Sender(pool.signer, tx)
if _, ok := queuedEvents[addr]; !ok { if _, ok := queuedEvents[addr]; !ok {
queuedEvents[addr] = newSortedMap() queuedEvents[addr] = NewSortedMap()
} }
queuedEvents[addr].Put(tx) queuedEvents[addr].Put(tx)
@ -1265,7 +1138,7 @@ func (pool *LegacyPool) scheduleReorgLoop() {
} }
// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*SortedMap) {
defer func(t0 time.Time) { defer func(t0 time.Time) {
reorgDurationTimer.Update(time.Since(t0)) reorgDurationTimer.Update(time.Since(t0))
}(time.Now()) }(time.Now())
@ -1332,7 +1205,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
for _, tx := range promoted { for _, tx := range promoted {
addr, _ := types.Sender(pool.signer, tx) addr, _ := types.Sender(pool.signer, tx)
if _, ok := events[addr]; !ok { if _, ok := events[addr]; !ok {
events[addr] = newSortedMap() events[addr] = NewSortedMap()
} }
events[addr].Put(tx) events[addr].Put(tx)
} }
@ -1441,7 +1314,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
// Inject any transactions discarded due to reorgs // Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject)) log.Debug("Reinjecting stale transactions", "count", len(reinject))
core.SenderCacher().Recover(pool.signer, reinject) core.SenderCacher().Recover(pool.signer, reinject)
pool.addTxsLocked(reinject, false) pool.addTxsLocked(reinject)
} }
// promoteExecutables moves transactions that have become processable from the // promoteExecutables moves transactions that have become processable from the
@ -1486,22 +1359,17 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T
queuedGauge.Dec(int64(len(readies))) queuedGauge.Dec(int64(len(readies)))
// Drop all transactions over the allowed limit // Drop all transactions over the allowed limit
var caps types.Transactions var caps = list.Cap(int(pool.config.AccountQueue))
if !pool.locals.contains(addr) { for _, tx := range caps {
caps = list.Cap(int(pool.config.AccountQueue)) hash := tx.Hash()
for _, tx := range caps { pool.all.Remove(hash)
hash := tx.Hash() log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
pool.all.Remove(hash)
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
}
queuedRateLimitMeter.Mark(int64(len(caps)))
} }
queuedRateLimitMeter.Mark(int64(len(caps)))
// Mark all the items dropped as removed // Mark all the items dropped as removed
pool.priced.Removed(len(forwards) + len(drops) + len(caps)) pool.priced.Removed(len(forwards) + len(drops) + len(caps))
queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
if pool.locals.contains(addr) {
localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
}
// Delete the entire queue entry if it became empty. // Delete the entire queue entry if it became empty.
if list.Empty() { if list.Empty() {
delete(pool.queue, addr) delete(pool.queue, addr)
@ -1531,14 +1399,14 @@ func (pool *LegacyPool) truncatePending() {
spammers := prque.New[int64, common.Address](nil) spammers := prque.New[int64, common.Address](nil)
for addr, list := range pool.pending { for addr, list := range pool.pending {
// Only evict transactions from high rollers // Only evict transactions from high rollers
if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { if uint64(list.Len()) > pool.config.AccountSlots {
spammers.Push(addr, int64(list.Len())) spammers.Push(addr, int64(list.Len()))
} }
} }
// Gradually drop transactions from offenders // Gradually drop transactions from offenders
offenders := []common.Address{} offenders := []common.Address{}
for pending > pool.config.GlobalSlots && !spammers.Empty() { for pending > pool.config.GlobalSlots && !spammers.Empty() {
// Retrieve the next offender if not local address // Retrieve the next offender
offender, _ := spammers.Pop() offender, _ := spammers.Pop()
offenders = append(offenders, offender) offenders = append(offenders, offender)
@ -1564,9 +1432,7 @@ func (pool *LegacyPool) truncatePending() {
} }
pool.priced.Removed(len(caps)) pool.priced.Removed(len(caps))
pendingGauge.Dec(int64(len(caps))) pendingGauge.Dec(int64(len(caps)))
if pool.locals.contains(offenders[i]) {
localGauge.Dec(int64(len(caps)))
}
pending-- pending--
} }
} }
@ -1591,9 +1457,6 @@ func (pool *LegacyPool) truncatePending() {
} }
pool.priced.Removed(len(caps)) pool.priced.Removed(len(caps))
pendingGauge.Dec(int64(len(caps))) pendingGauge.Dec(int64(len(caps)))
if pool.locals.contains(addr) {
localGauge.Dec(int64(len(caps)))
}
pending-- pending--
} }
} }
@ -1614,13 +1477,11 @@ func (pool *LegacyPool) truncateQueue() {
// Sort all accounts with queued transactions by heartbeat // Sort all accounts with queued transactions by heartbeat
addresses := make(addressesByHeartbeat, 0, len(pool.queue)) addresses := make(addressesByHeartbeat, 0, len(pool.queue))
for addr := range pool.queue { for addr := range pool.queue {
if !pool.locals.contains(addr) { // don't drop locals addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]})
}
} }
sort.Sort(sort.Reverse(addresses)) sort.Sort(sort.Reverse(addresses))
// Drop transactions until the total is below the limit or only locals remain // Drop transactions until the total is below the limit
for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; {
addr := addresses[len(addresses)-1] addr := addresses[len(addresses)-1]
list := pool.queue[addr.address] list := pool.queue[addr.address]
@ -1680,12 +1541,10 @@ func (pool *LegacyPool) demoteUnexecutables() {
log.Trace("Demoting pending transaction", "hash", hash) log.Trace("Demoting pending transaction", "hash", hash)
// Internal shuffle shouldn't touch the lookup set. // Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(hash, tx, false, false) pool.enqueueTx(hash, tx, false)
} }
pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
if pool.locals.contains(addr) {
localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
}
// If there's a gap in front, alert (should never happen) and postpone all transactions // If there's a gap in front, alert (should never happen) and postpone all transactions
if list.Len() > 0 && list.txs.Get(nonce) == nil { if list.Len() > 0 && list.txs.Get(nonce) == nil {
gapped := list.Cap(0) gapped := list.Cap(0)
@ -1694,7 +1553,7 @@ func (pool *LegacyPool) demoteUnexecutables() {
log.Error("Demoting invalidated transaction", "hash", hash) log.Error("Demoting invalidated transaction", "hash", hash)
// Internal shuffle shouldn't touch the lookup set. // Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(hash, tx, false, false) pool.enqueueTx(hash, tx, false)
} }
pendingGauge.Dec(int64(len(gapped))) pendingGauge.Dec(int64(len(gapped)))
} }
@ -1741,21 +1600,6 @@ func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet {
return as return as
} }
// contains checks if a given address is contained within the set.
func (as *accountSet) contains(addr common.Address) bool {
_, exist := as.accounts[addr]
return exist
}
// containsTx checks if the sender of a given tx is within the set. If the sender
// cannot be derived, this method returns false.
func (as *accountSet) containsTx(tx *types.Transaction) bool {
if addr, err := types.Sender(as.signer, tx); err == nil {
return as.contains(addr)
}
return false
}
// add inserts a new address into the set to track. // add inserts a new address into the set to track.
func (as *accountSet) add(addr common.Address) { func (as *accountSet) add(addr common.Address) {
as.accounts[addr] = struct{}{} as.accounts[addr] = struct{}{}
@ -1793,43 +1637,29 @@ func (as *accountSet) merge(other *accountSet) {
// internal mechanisms. The sole purpose of the type is to permit out-of-bound // internal mechanisms. The sole purpose of the type is to permit out-of-bound
// peeking into the pool in LegacyPool.Get without having to acquire the widely scoped // peeking into the pool in LegacyPool.Get without having to acquire the widely scoped
// LegacyPool.mu mutex. // LegacyPool.mu mutex.
//
// This lookup set combines the notion of "local transactions", which is useful
// to build upper-level structure.
type lookup struct { type lookup struct {
slots int slots int
lock sync.RWMutex lock sync.RWMutex
locals map[common.Hash]*types.Transaction txs map[common.Hash]*types.Transaction
remotes map[common.Hash]*types.Transaction
} }
// newLookup returns a new lookup structure. // newLookup returns a new lookup structure.
func newLookup() *lookup { func newLookup() *lookup {
return &lookup{ return &lookup{
locals: make(map[common.Hash]*types.Transaction), txs: make(map[common.Hash]*types.Transaction),
remotes: make(map[common.Hash]*types.Transaction),
} }
} }
// Range calls f on each key and value present in the map. The callback passed // Range calls f on each key and value present in the map. The callback passed
// should return the indicator whether the iteration needs to be continued. // should return the indicator whether the iteration needs to be continued.
// Callers need to specify which set (or both) to be iterated. // Callers need to specify which set (or both) to be iterated.
func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) {
t.lock.RLock() t.lock.RLock()
defer t.lock.RUnlock() defer t.lock.RUnlock()
if local { for key, value := range t.txs {
for key, value := range t.locals { if !f(key, value) {
if !f(key, value, true) { return
return
}
}
}
if remote {
for key, value := range t.remotes {
if !f(key, value, false) {
return
}
} }
} }
} }
@ -1839,26 +1669,7 @@ func (t *lookup) Get(hash common.Hash) *types.Transaction {
t.lock.RLock() t.lock.RLock()
defer t.lock.RUnlock() defer t.lock.RUnlock()
if tx := t.locals[hash]; tx != nil { return t.txs[hash]
return tx
}
return t.remotes[hash]
}
// GetLocal returns a transaction if it exists in the lookup, or nil if not found.
func (t *lookup) GetLocal(hash common.Hash) *types.Transaction {
t.lock.RLock()
defer t.lock.RUnlock()
return t.locals[hash]
}
// GetRemote returns a transaction if it exists in the lookup, or nil if not found.
func (t *lookup) GetRemote(hash common.Hash) *types.Transaction {
t.lock.RLock()
defer t.lock.RUnlock()
return t.remotes[hash]
} }
// Count returns the current number of transactions in the lookup. // Count returns the current number of transactions in the lookup.
@ -1866,23 +1677,7 @@ func (t *lookup) Count() int {
t.lock.RLock() t.lock.RLock()
defer t.lock.RUnlock() defer t.lock.RUnlock()
return len(t.locals) + len(t.remotes) return len(t.txs)
}
// LocalCount returns the current number of local transactions in the lookup.
func (t *lookup) LocalCount() int {
t.lock.RLock()
defer t.lock.RUnlock()
return len(t.locals)
}
// RemoteCount returns the current number of remote transactions in the lookup.
func (t *lookup) RemoteCount() int {
t.lock.RLock()
defer t.lock.RUnlock()
return len(t.remotes)
} }
// Slots returns the current number of slots used in the lookup. // Slots returns the current number of slots used in the lookup.
@ -1894,18 +1689,14 @@ func (t *lookup) Slots() int {
} }
// Add adds a transaction to the lookup. // Add adds a transaction to the lookup.
func (t *lookup) Add(tx *types.Transaction, local bool) { func (t *lookup) Add(tx *types.Transaction) {
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
t.slots += numSlots(tx) t.slots += numSlots(tx)
slotsGauge.Update(int64(t.slots)) slotsGauge.Update(int64(t.slots))
if local { t.txs[tx.Hash()] = tx
t.locals[tx.Hash()] = tx
} else {
t.remotes[tx.Hash()] = tx
}
} }
// Remove removes a transaction from the lookup. // Remove removes a transaction from the lookup.
@ -1913,10 +1704,7 @@ func (t *lookup) Remove(hash common.Hash) {
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
tx, ok := t.locals[hash] tx, ok := t.txs[hash]
if !ok {
tx, ok = t.remotes[hash]
}
if !ok { if !ok {
log.Error("No transaction found to be deleted", "hash", hash) log.Error("No transaction found to be deleted", "hash", hash)
return return
@ -1924,36 +1712,18 @@ func (t *lookup) Remove(hash common.Hash) {
t.slots -= numSlots(tx) t.slots -= numSlots(tx)
slotsGauge.Update(int64(t.slots)) slotsGauge.Update(int64(t.slots))
delete(t.locals, hash) delete(t.txs, hash)
delete(t.remotes, hash)
} }
// RemoteToLocals migrates the transactions belongs to the given locals to locals // TxsBelowTip finds all remote transactions below the given tip threshold.
// set. The assumption is held the locals set is thread-safe to be used. func (t *lookup) TxsBelowTip(threshold *big.Int) types.Transactions {
func (t *lookup) RemoteToLocals(locals *accountSet) int {
t.lock.Lock()
defer t.lock.Unlock()
var migrated int
for hash, tx := range t.remotes {
if locals.containsTx(tx) {
t.locals[hash] = tx
delete(t.remotes, hash)
migrated += 1
}
}
return migrated
}
// RemotesBelowTip finds all remote transactions below the given tip threshold.
func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
found := make(types.Transactions, 0, 128) found := make(types.Transactions, 0, 128)
t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { t.Range(func(hash common.Hash, tx *types.Transaction) bool {
if tx.GasTipCapIntCmp(threshold) < 0 { if tx.GasTipCapIntCmp(threshold) < 0 {
found = append(found, tx) found = append(found, tx)
} }
return true return true
}, false, true) // Only iterate remotes })
return found return found
} }
@ -1982,24 +1752,13 @@ func (pool *LegacyPool) Clear() {
// The transaction addition may attempt to reserve the sender addr which // The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot // can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed. // acquire the subpool lock until the transaction addition is completed.
for _, tx := range pool.all.remotes { for _, tx := range pool.all.txs {
senderAddr, _ := types.Sender(pool.signer, tx) senderAddr, _ := types.Sender(pool.signer, tx)
pool.reserve(senderAddr, false) pool.reserve(senderAddr, false)
} }
for localSender := range pool.locals.accounts {
pool.reserve(localSender, false)
}
pool.all = newLookup() pool.all = newLookup()
pool.priced = newPricedList(pool.all) pool.priced = newPricedList(pool.all)
pool.pending = make(map[common.Address]*list) pool.pending = make(map[common.Address]*list)
pool.queue = make(map[common.Address]*list) pool.queue = make(map[common.Address]*list)
pool.pendingNonces = newNoncer(pool.currentState) pool.pendingNonces = newNoncer(pool.currentState)
if !pool.config.NoLocals && pool.config.Journal != "" {
pool.journal = newTxJournal(pool.config.Journal)
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate transaction journal", "err", err)
}
}
} }

View file

@ -13,6 +13,7 @@
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package legacypool package legacypool
import ( import (

View file

@ -23,7 +23,6 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"math/rand" "math/rand"
"os"
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
@ -183,7 +182,7 @@ func validatePoolInternals(pool *LegacyPool) error {
return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued)
} }
pool.priced.Reheap() pool.priced.Reheap()
priced, remote := pool.priced.urgent.Len()+pool.priced.floating.Len(), pool.all.RemoteCount() priced, remote := pool.priced.urgent.Len()+pool.priced.floating.Len(), pool.all.Count()
if priced != remote { if priced != remote {
return fmt.Errorf("total priced transaction count %d != %d", priced, remote) return fmt.Errorf("total priced transaction count %d != %d", priced, remote)
} }
@ -252,7 +251,7 @@ func (c *testChain) State() (*state.StateDB, error) {
if *c.trigger { if *c.trigger {
c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) c.statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
// simulate that the new head block included tx0 and tx1 // simulate that the new head block included tx0 and tx1
c.statedb.SetNonce(c.address, 2) c.statedb.SetNonce(c.address, 2, tracing.NonceChangeUnspecified)
c.statedb.SetBalance(c.address, new(uint256.Int).SetUint64(params.Ether), tracing.BalanceChangeUnspecified) c.statedb.SetBalance(c.address, new(uint256.Int).SetUint64(params.Ether), tracing.BalanceChangeUnspecified)
*c.trigger = false *c.trigger = false
} }
@ -313,7 +312,7 @@ func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) {
func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) {
pool.mu.Lock() pool.mu.Lock()
pool.currentState.SetNonce(addr, nonce) pool.currentState.SetNonce(addr, nonce, tracing.NonceChangeUnspecified)
pool.mu.Unlock() pool.mu.Unlock()
} }
@ -350,9 +349,6 @@ func TestInvalidTransactions(t *testing.T) {
if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) {
t.Errorf("want %v have %v", want, err) t.Errorf("want %v have %v", want, err)
} }
if err := pool.addLocal(tx); err != nil {
t.Error("expected", nil, "got", err)
}
} }
func TestQueue(t *testing.T) { func TestQueue(t *testing.T) {
@ -366,7 +362,7 @@ func TestQueue(t *testing.T) {
testAddBalance(pool, from, big.NewInt(1000)) testAddBalance(pool, from, big.NewInt(1000))
<-pool.requestReset(nil, nil) <-pool.requestReset(nil, nil)
pool.enqueueTx(tx.Hash(), tx, false, true) pool.enqueueTx(tx.Hash(), tx, true)
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
if len(pool.pending) != 1 { if len(pool.pending) != 1 {
t.Error("expected valid txs to be 1 is", len(pool.pending)) t.Error("expected valid txs to be 1 is", len(pool.pending))
@ -375,7 +371,7 @@ func TestQueue(t *testing.T) {
tx = transaction(1, 100, key) tx = transaction(1, 100, key)
from, _ = deriveSender(tx) from, _ = deriveSender(tx)
testSetNonce(pool, from, 2) testSetNonce(pool, from, 2)
pool.enqueueTx(tx.Hash(), tx, false, true) pool.enqueueTx(tx.Hash(), tx, true)
<-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
@ -399,9 +395,9 @@ func TestQueue2(t *testing.T) {
testAddBalance(pool, from, big.NewInt(1000)) testAddBalance(pool, from, big.NewInt(1000))
pool.reset(nil, nil) pool.reset(nil, nil)
pool.enqueueTx(tx1.Hash(), tx1, false, true) pool.enqueueTx(tx1.Hash(), tx1, true)
pool.enqueueTx(tx2.Hash(), tx2, false, true) pool.enqueueTx(tx2.Hash(), tx2, true)
pool.enqueueTx(tx3.Hash(), tx3, false, true) pool.enqueueTx(tx3.Hash(), tx3, true)
pool.promoteExecutables([]common.Address{from}) pool.promoteExecutables([]common.Address{from})
if len(pool.pending) != 1 { if len(pool.pending) != 1 {
@ -476,14 +472,14 @@ func TestChainFork(t *testing.T) {
resetState() resetState()
tx := transaction(0, 100000, key) tx := transaction(0, 100000, key)
if _, err := pool.add(tx, false); err != nil { if _, err := pool.add(tx); err != nil {
t.Error("didn't expect error", err) t.Error("didn't expect error", err)
} }
pool.removeTx(tx.Hash(), true, true) pool.removeTx(tx.Hash(), true, true)
// reset the pool's internal state // reset the pool's internal state
resetState() resetState()
if _, err := pool.add(tx, false); err != nil { if _, err := pool.add(tx); err != nil {
t.Error("didn't expect error", err) t.Error("didn't expect error", err)
} }
} }
@ -510,10 +506,10 @@ func TestDoubleNonce(t *testing.T) {
tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(1), nil), signer, key) tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(1), nil), signer, key)
// Add the first two transaction, ensure higher priced stays only // Add the first two transaction, ensure higher priced stays only
if replace, err := pool.add(tx1, false); err != nil || replace { if replace, err := pool.add(tx1); err != nil || replace {
t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace) t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace)
} }
if replace, err := pool.add(tx2, false); err != nil || !replace { if replace, err := pool.add(tx2); err != nil || !replace {
t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace)
} }
<-pool.requestPromoteExecutables(newAccountSet(signer, addr)) <-pool.requestPromoteExecutables(newAccountSet(signer, addr))
@ -525,7 +521,7 @@ func TestDoubleNonce(t *testing.T) {
} }
// Add the third transaction and ensure it's not saved (smaller price) // Add the third transaction and ensure it's not saved (smaller price)
pool.add(tx3, false) pool.add(tx3)
<-pool.requestPromoteExecutables(newAccountSet(signer, addr)) <-pool.requestPromoteExecutables(newAccountSet(signer, addr))
if pool.pending[addr].Len() != 1 { if pool.pending[addr].Len() != 1 {
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
@ -548,7 +544,7 @@ func TestMissingNonce(t *testing.T) {
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
testAddBalance(pool, addr, big.NewInt(100000000000000)) testAddBalance(pool, addr, big.NewInt(100000000000000))
tx := transaction(1, 100000, key) tx := transaction(1, 100000, key)
if _, err := pool.add(tx, false); err != nil { if _, err := pool.add(tx); err != nil {
t.Error("didn't expect error", err) t.Error("didn't expect error", err)
} }
if len(pool.pending) != 0 { if len(pool.pending) != 0 {
@ -607,21 +603,21 @@ func TestDropping(t *testing.T) {
tx11 = transaction(11, 200, key) tx11 = transaction(11, 200, key)
tx12 = transaction(12, 300, key) tx12 = transaction(12, 300, key)
) )
pool.all.Add(tx0, false) pool.all.Add(tx0)
pool.priced.Put(tx0, false) pool.priced.Put(tx0)
pool.promoteTx(account, tx0.Hash(), tx0) pool.promoteTx(account, tx0.Hash(), tx0)
pool.all.Add(tx1, false) pool.all.Add(tx1)
pool.priced.Put(tx1, false) pool.priced.Put(tx1)
pool.promoteTx(account, tx1.Hash(), tx1) pool.promoteTx(account, tx1.Hash(), tx1)
pool.all.Add(tx2, false) pool.all.Add(tx2)
pool.priced.Put(tx2, false) pool.priced.Put(tx2)
pool.promoteTx(account, tx2.Hash(), tx2) pool.promoteTx(account, tx2.Hash(), tx2)
pool.enqueueTx(tx10.Hash(), tx10, false, true) pool.enqueueTx(tx10.Hash(), tx10, true)
pool.enqueueTx(tx11.Hash(), tx11, false, true) pool.enqueueTx(tx11.Hash(), tx11, true)
pool.enqueueTx(tx12.Hash(), tx12, false, true) pool.enqueueTx(tx12.Hash(), tx12, true)
// Check that pre and post validations leave the pool as is // Check that pre and post validations leave the pool as is
if pool.pending[account].Len() != 3 { if pool.pending[account].Len() != 3 {
@ -899,13 +895,6 @@ func TestQueueAccountLimiting(t *testing.T) {
// This logic should not hold for local transactions, unless the local tracking // This logic should not hold for local transactions, unless the local tracking
// mechanism is disabled. // mechanism is disabled.
func TestQueueGlobalLimiting(t *testing.T) { func TestQueueGlobalLimiting(t *testing.T) {
testQueueGlobalLimiting(t, false)
}
func TestQueueGlobalLimitingNoLocals(t *testing.T) {
testQueueGlobalLimiting(t, true)
}
func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
t.Parallel() t.Parallel()
// Create the pool to test the limit enforcement with // Create the pool to test the limit enforcement with
@ -913,7 +902,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig config := testTxPoolConfig
config.NoLocals = nolocals config.NoLocals = true
config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
pool := New(config, blockchain) pool := New(config, blockchain)
@ -926,7 +915,6 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
keys[i], _ = crypto.GenerateKey() keys[i], _ = crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
} }
local := keys[len(keys)-1]
// Generate and queue a batch of transactions // Generate and queue a batch of transactions
nonces := make(map[common.Address]uint64) nonces := make(map[common.Address]uint64)
@ -952,51 +940,12 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
if queued > int(config.GlobalQueue) { if queued > int(config.GlobalQueue) {
t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue)
} }
// Generate a batch of transactions from the local account and import them
txs = txs[:0]
for i := uint64(0); i < 3*config.GlobalQueue; i++ {
txs = append(txs, transaction(i+1, 100000, local))
}
pool.addLocals(txs)
// If locals are disabled, the previous eviction algorithm should apply here too
if nolocals {
queued := 0
for addr, list := range pool.queue {
if list.Len() > int(config.AccountQueue) {
t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue)
}
queued += list.Len()
}
if queued > int(config.GlobalQueue) {
t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue)
}
} else {
// Local exemptions are enabled, make sure the local account owned the queue
if len(pool.queue) != 1 {
t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1)
}
// Also ensure no local transactions are ever dropped, even if above global limits
if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue {
t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue)
}
}
} }
// Tests that if an account remains idle for a prolonged amount of time, any // Tests that if an account remains idle for a prolonged amount of time, any
// non-executable transactions queued up are dropped to prevent wasting resources // non-executable transactions queued up are dropped to prevent wasting resources
// on shuffling them around. // on shuffling them around.
//
// This logic should not hold for local transactions, unless the local tracking
// mechanism is disabled.
func TestQueueTimeLimiting(t *testing.T) { func TestQueueTimeLimiting(t *testing.T) {
testQueueTimeLimiting(t, false)
}
func TestQueueTimeLimitingNoLocals(t *testing.T) {
testQueueTimeLimiting(t, true)
}
func testQueueTimeLimiting(t *testing.T, nolocals bool) {
// Reduce the eviction interval to a testable amount // Reduce the eviction interval to a testable amount
defer func(old time.Duration) { evictionInterval = old }(evictionInterval) defer func(old time.Duration) { evictionInterval = old }(evictionInterval)
evictionInterval = time.Millisecond * 100 evictionInterval = time.Millisecond * 100
@ -1007,23 +956,17 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
config := testTxPoolConfig config := testTxPoolConfig
config.Lifetime = time.Second config.Lifetime = time.Second
config.NoLocals = nolocals
pool := New(config, blockchain) pool := New(config, blockchain)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close() defer pool.Close()
// Create two test accounts to ensure remotes expire but locals do not // Create a test account to ensure remotes expire
local, _ := crypto.GenerateKey()
remote, _ := crypto.GenerateKey() remote, _ := crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000))
testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
// Add the two transactions and ensure they both are queued up // Add the transaction and ensure it is queued up
if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err) t.Fatalf("failed to add remote transaction: %v", err)
} }
@ -1031,7 +974,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
if pending != 0 { if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
} }
if queued != 2 { if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
@ -1046,7 +989,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
if pending != 0 { if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
} }
if queued != 2 { if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
@ -1060,22 +1003,15 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
if pending != 0 { if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
} }
if nolocals { if queued != 0 {
if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
} else {
if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
}
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err) t.Fatalf("pool internal state corrupted: %v", err)
} }
// remove current transactions and increase nonce to prepare for a reset and cleanup // remove current transactions and increase nonce to prepare for a reset and cleanup
statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2) statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2, tracing.NonceChangeUnspecified)
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
<-pool.requestReset(nil, nil) <-pool.requestReset(nil, nil)
// make sure queue, pending are cleared // make sure queue, pending are cleared
@ -1091,18 +1027,12 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
} }
// Queue gapped transactions // Queue gapped transactions
if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err) t.Fatalf("failed to add remote transaction: %v", err)
} }
time.Sleep(5 * evictionInterval) // A half lifetime pass time.Sleep(5 * evictionInterval) // A half lifetime pass
// Queue executable transactions, the life cycle should be restarted. // Queue executable transactions, the life cycle should be restarted.
if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err) t.Fatalf("failed to add remote transaction: %v", err)
} }
@ -1110,11 +1040,11 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
// All gapped transactions shouldn't be kicked out // All gapped transactions shouldn't be kicked out
pending, queued = pool.Stats() pending, queued = pool.Stats()
if pending != 2 { if pending != 1 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
} }
if queued != 2 { if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err) t.Fatalf("pool internal state corrupted: %v", err)
@ -1123,17 +1053,11 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
// The whole life time pass after last promotion, kick out stale transactions // The whole life time pass after last promotion, kick out stale transactions
time.Sleep(2 * config.Lifetime) time.Sleep(2 * config.Lifetime)
pending, queued = pool.Stats() pending, queued = pool.Stats()
if pending != 2 { if pending != 1 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
} }
if nolocals { if queued != 0 {
if queued != 0 { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
} else {
if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
}
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err) t.Fatalf("pool internal state corrupted: %v", err)
@ -1363,8 +1287,6 @@ func TestPendingMinimumAllowance(t *testing.T) {
// Tests that setting the transaction pool gas price to a higher value correctly // Tests that setting the transaction pool gas price to a higher value correctly
// discards everything cheaper than that and moves any gapped transactions back // discards everything cheaper than that and moves any gapped transactions back
// from the pending pool to the queue. // from the pending pool to the queue.
//
// Note, local transactions are never allowed to be dropped.
func TestRepricing(t *testing.T) { func TestRepricing(t *testing.T) {
t.Parallel() t.Parallel()
@ -1382,7 +1304,7 @@ func TestRepricing(t *testing.T) {
defer sub.Unsubscribe() defer sub.Unsubscribe()
// Create a number of test accounts and fund them // Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 4) keys := make([]*ecdsa.PrivateKey, 3)
for i := 0; i < len(keys); i++ { for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey() keys[i], _ = crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
@ -1402,20 +1324,17 @@ func TestRepricing(t *testing.T) {
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2]))
txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[2])) txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[2]))
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up // Import the batch and that both pending and queued transactions match up
pool.addRemotesSync(txs) pool.addRemotesSync(txs)
pool.addLocal(ltx)
pending, queued := pool.Stats() pending, queued := pool.Stats()
if pending != 7 { if pending != 6 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 6)
} }
if queued != 3 { if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
} }
if err := validateEvents(events, 7); err != nil { if err := validateEvents(events, 6); err != nil {
t.Fatalf("original event firing failed: %v", err) t.Fatalf("original event firing failed: %v", err)
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
@ -1425,8 +1344,8 @@ func TestRepricing(t *testing.T) {
pool.SetGasTip(big.NewInt(2)) pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats() pending, queued = pool.Stats()
if pending != 2 { if pending != 1 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
} }
if queued != 5 { if queued != 5 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5)
@ -1453,21 +1372,7 @@ func TestRepricing(t *testing.T) {
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err) t.Fatalf("pool internal state corrupted: %v", err)
} }
// However we can add local underpriced transactions // we can fill gaps with properly priced transactions
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3])
if err := pool.addLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
if pending, _ = pool.Stats(); pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
if err := validateEvents(events, 1); err != nil {
t.Fatalf("post-reprice local event firing failed: %v", err)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
// And we can fill gaps with properly priced transactions
if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err) t.Fatalf("failed to add pending transaction: %v", err)
} }
@ -1504,29 +1409,16 @@ func TestMinGasPriceEnforced(t *testing.T) {
tx := pricedTransaction(0, 100000, big.NewInt(2), key) tx := pricedTransaction(0, 100000, big.NewInt(2), key)
pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1)) pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1))
if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { if err := pool.Add([]*types.Transaction{tx}, true)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced")
}
if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced") t.Fatalf("Min tip not enforced")
} }
tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key) tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key)
pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1)) pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1))
if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { if err := pool.Add([]*types.Transaction{tx}, true)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced") t.Fatalf("Min tip not enforced")
} }
if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
t.Fatalf("Min tip not enforced")
}
// Make sure the tx is accepted if locals are enabled
pool.config.NoLocals = false
if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; err != nil {
t.Fatalf("Min tip enforced with locals enabled, error: %v", err)
}
} }
// Tests that setting the transaction pool gas price to a higher value correctly // Tests that setting the transaction pool gas price to a higher value correctly
@ -1567,20 +1459,17 @@ func TestRepricingDynamicFee(t *testing.T) {
txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])) txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]))
txs = append(txs, dynamicFeeTx(3, 100000, big.NewInt(2), big.NewInt(2), keys[2])) txs = append(txs, dynamicFeeTx(3, 100000, big.NewInt(2), big.NewInt(2), keys[2]))
ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up // Import the batch and that both pending and queued transactions match up
pool.addRemotesSync(txs) pool.addRemotesSync(txs)
pool.addLocal(ltx)
pending, queued := pool.Stats() pending, queued := pool.Stats()
if pending != 7 { if pending != 6 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 6)
} }
if queued != 3 { if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
} }
if err := validateEvents(events, 7); err != nil { if err := validateEvents(events, 6); err != nil {
t.Fatalf("original event firing failed: %v", err) t.Fatalf("original event firing failed: %v", err)
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
@ -1590,8 +1479,8 @@ func TestRepricingDynamicFee(t *testing.T) {
pool.SetGasTip(big.NewInt(2)) pool.SetGasTip(big.NewInt(2))
pending, queued = pool.Stats() pending, queued = pool.Stats()
if pending != 2 { if pending != 1 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
} }
if queued != 5 { if queued != 5 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5)
@ -1621,20 +1510,7 @@ func TestRepricingDynamicFee(t *testing.T) {
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err) t.Fatalf("pool internal state corrupted: %v", err)
} }
// However we can add local underpriced transactions
tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3])
if err := pool.addLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
if pending, _ = pool.Stats(); pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
if err := validateEvents(events, 1); err != nil {
t.Fatalf("post-reprice local event firing failed: %v", err)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
// And we can fill gaps with properly priced transactions // And we can fill gaps with properly priced transactions
tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0])
if err := pool.addRemote(tx); err != nil { if err := pool.addRemote(tx); err != nil {
@ -1656,77 +1532,6 @@ func TestRepricingDynamicFee(t *testing.T) {
} }
} }
// Tests that setting the transaction pool gas price to a higher value does not
// remove local transactions (legacy & dynamic fee).
func TestRepricingKeepsLocals(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
pool := New(testTxPoolConfig, blockchain)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 3)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000))
}
// Create transaction (both pending and queued) with a linearly growing gasprice
for i := uint64(0); i < 500; i++ {
// Add pending transaction.
pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2])
if err := pool.addLocal(pendingTx); err != nil {
t.Fatal(err)
}
// Add queued transaction.
queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2])
if err := pool.addLocal(queuedTx); err != nil {
t.Fatal(err)
}
// Add pending dynamic fee transaction.
pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
if err := pool.addLocal(pendingTx); err != nil {
t.Fatal(err)
}
// Add queued dynamic fee transaction.
queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
if err := pool.addLocal(queuedTx); err != nil {
t.Fatal(err)
}
}
pending, queued := pool.Stats()
expPending, expQueued := 1000, 1000
validate := func() {
pending, queued = pool.Stats()
if pending != expPending {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, expPending)
}
if queued != expQueued {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
}
validate()
// Reprice the pool and check that nothing is dropped
pool.SetGasTip(big.NewInt(2))
validate()
pool.SetGasTip(big.NewInt(2))
pool.SetGasTip(big.NewInt(4))
pool.SetGasTip(big.NewInt(8))
pool.SetGasTip(big.NewInt(100))
validate()
}
// Tests that when the pool reaches its global transaction limit, underpriced // Tests that when the pool reaches its global transaction limit, underpriced
// transactions are gradually shifted out for more expensive ones and any gapped // transactions are gradually shifted out for more expensive ones and any gapped
// pending transactions are moved into the queue. // pending transactions are moved into the queue.
@ -1756,21 +1561,18 @@ func TestUnderpricing(t *testing.T) {
keys := make([]*ecdsa.PrivateKey, 5) keys := make([]*ecdsa.PrivateKey, 5)
for i := 0; i < len(keys); i++ { for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey() keys[i], _ = crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(10000000))
} }
// Generate and queue a batch of transactions, both pending and queued // Generate and queue a batch of transactions, both pending and queued
txs := types.Transactions{} txs := types.Transactions{}
txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) // pending
txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) // pending
txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[2])) // pending
txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[1]))
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2])
txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[1])) // queued
// Import the batch and that both pending and queued transactions match up // Import the batch and that both pending and queued transactions match up
pool.addRemotes(txs) pool.addRemotesSync(txs)
pool.addLocal(ltx)
pending, queued := pool.Stats() pending, queued := pool.Stats()
if pending != 3 { if pending != 3 {
@ -1790,7 +1592,7 @@ func TestUnderpricing(t *testing.T) {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
} }
// Replace a future transaction with a future transaction // Replace a future transaction with a future transaction
if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
t.Fatalf("failed to add well priced transaction: %v", err) t.Fatalf("failed to add well priced transaction: %v", err)
} }
// Ensure that adding high priced transactions drops cheap ones, but not own // Ensure that adding high priced transactions drops cheap ones, but not own
@ -1800,48 +1602,26 @@ func TestUnderpricing(t *testing.T) {
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2
t.Fatalf("failed to add well priced transaction: %v", err) t.Fatalf("failed to add well priced transaction: %v", err)
} }
if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 if err := pool.addRemoteSync(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3
t.Fatalf("failed to add well priced transaction: %v", err) t.Fatalf("failed to add well priced transaction: %v", err)
} }
// Ensure that replacing a pending transaction with a future transaction fails // Ensure that replacing a pending transaction with a future transaction fails
if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { if err := pool.addRemoteSync(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending {
t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending) t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending)
} }
pending, queued = pool.Stats() pending, queued = pool.Stats()
if pending != 2 { if pending != 4 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
} }
if queued != 2 { if queued != 0 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
} }
if err := validateEvents(events, 2); err != nil { if err := validateEvents(events, 4); err != nil {
t.Fatalf("additional event firing failed: %v", err) t.Fatalf("additional event firing failed: %v", err)
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err) t.Fatalf("pool internal state corrupted: %v", err)
} }
// Ensure that adding local transactions can push out even higher priced ones
ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2])
if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to append underpriced local transaction: %v", err)
}
ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3])
if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to add new underpriced local transaction: %v", err)
}
pending, queued = pool.Stats()
if pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
}
if err := validateEvents(events, 2); err != nil {
t.Fatalf("local event firing failed: %v", err)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
} }
// Tests that more expensive transactions push out cheap ones from the pool, but // Tests that more expensive transactions push out cheap ones from the pool, but
@ -1915,8 +1695,6 @@ func TestStableUnderpricing(t *testing.T) {
// Tests that when the pool reaches its global transaction limit, underpriced // Tests that when the pool reaches its global transaction limit, underpriced
// transactions (legacy & dynamic fee) are gradually shifted out for more // transactions (legacy & dynamic fee) are gradually shifted out for more
// expensive ones and any gapped pending transactions are moved into the queue. // expensive ones and any gapped pending transactions are moved into the queue.
//
// Note, local transactions are never allowed to be dropped.
func TestUnderpricingDynamicFee(t *testing.T) { func TestUnderpricingDynamicFee(t *testing.T) {
t.Parallel() t.Parallel()
@ -1941,15 +1719,13 @@ func TestUnderpricingDynamicFee(t *testing.T) {
// Generate and queue a batch of transactions, both pending and queued // Generate and queue a batch of transactions, both pending and queued
txs := types.Transactions{} txs := types.Transactions{}
txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])) txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])) // pending
txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) // pending
txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(1), keys[1])) txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(1), keys[1])) // queued
txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2])) // pending
ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) // Import the batch and check that both pending and queued transactions match up
pool.addRemotesSync(txs) // Pend K0:0, K0:1; Que K1:1
// Import the batch and that both pending and queued transactions match up
pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1
pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
pending, queued := pool.Stats() pending, queued := pool.Stats()
if pending != 3 { if pending != 3 {
@ -1967,13 +1743,13 @@ func TestUnderpricingDynamicFee(t *testing.T) {
// Ensure that adding an underpriced transaction fails // Ensure that adding an underpriced transaction fails
tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 if err := pool.addRemoteSync(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
} }
// Ensure that adding high priced transactions drops cheap ones, but not own // Ensure that adding high priced transactions drops cheap ones, but not own
tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1]) tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1])
if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - if err := pool.addRemoteSync(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
t.Fatalf("failed to add well priced transaction: %v", err) t.Fatalf("failed to add well priced transaction: %v", err)
} }
@ -1986,40 +1762,18 @@ func TestUnderpricingDynamicFee(t *testing.T) {
t.Fatalf("failed to add well priced transaction: %v", err) t.Fatalf("failed to add well priced transaction: %v", err)
} }
pending, queued = pool.Stats() pending, queued = pool.Stats()
if pending != 2 { if pending != 4 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
} }
if queued != 2 { if queued != 0 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
} }
if err := validateEvents(events, 2); err != nil { if err := validateEvents(events, 3); err != nil {
t.Fatalf("additional event firing failed: %v", err) t.Fatalf("additional event firing failed: %v", err)
} }
if err := validatePoolInternals(pool); err != nil { if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err) t.Fatalf("pool internal state corrupted: %v", err)
} }
// Ensure that adding local transactions can push out even higher priced ones
ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2])
if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to append underpriced local transaction: %v", err)
}
ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3])
if err := pool.addLocal(ltx); err != nil {
t.Fatalf("failed to add new underpriced local transaction: %v", err)
}
pending, queued = pool.Stats()
if pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
}
if err := validateEvents(events, 2); err != nil {
t.Fatalf("local event firing failed: %v", err)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
} }
// Tests whether highest fee cap transaction is retained after a batch of high effective // Tests whether highest fee cap transaction is retained after a batch of high effective
@ -2039,7 +1793,7 @@ func TestDualHeapEviction(t *testing.T) {
) )
check := func(tx *types.Transaction, name string) { check := func(tx *types.Transaction, name string) {
if pool.all.GetRemote(tx.Hash()) == nil { if pool.all.Get(tx.Hash()) == nil {
t.Fatalf("highest %s transaction evicted from the pool", name) t.Fatalf("highest %s transaction evicted from the pool", name)
} }
} }
@ -2336,122 +2090,6 @@ func TestReplacementDynamicFee(t *testing.T) {
} }
} }
// Tests that local transactions are journaled to disk, but remote transactions
// get discarded between restarts.
func TestJournaling(t *testing.T) { testJournaling(t, false) }
func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) }
func testJournaling(t *testing.T, nolocals bool) {
t.Parallel()
// Create a temporary file for the journal
file, err := os.CreateTemp("", "")
if err != nil {
t.Fatalf("failed to create temporary journal: %v", err)
}
journal := file.Name()
defer os.Remove(journal)
// Clean up the temporary file, we only need the path for now
file.Close()
os.Remove(journal)
// Create the original pool to inject transaction into the journal
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
config := testTxPoolConfig
config.NoLocals = nolocals
config.Journal = journal
config.Rejournal = time.Second
pool := New(config, blockchain)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
// Create two test accounts to ensure remotes expire but locals do not
local, _ := crypto.GenerateKey()
remote, _ := crypto.GenerateKey()
testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000))
testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
// Add three local and a remote transactions and ensure they are queued up
if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
t.Fatalf("failed to add local transaction: %v", err)
}
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil {
t.Fatalf("failed to add remote transaction: %v", err)
}
pending, queued := pool.Stats()
if pending != 4 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
}
if queued != 0 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
pool.Close()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool = New(config, blockchain)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats()
if queued != 0 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
if nolocals {
if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
}
} else {
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
// Bump the nonce temporarily and ensure the newly invalidated transaction is removed
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
<-pool.requestReset(nil, nil)
time.Sleep(2 * config.Rejournal)
pool.Close()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
pool = New(config, blockchain)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats()
if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
}
if nolocals {
if queued != 0 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
} else {
if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
}
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
pool.Close()
}
// TestStatusCheck tests that the pool can correctly retrieve the // TestStatusCheck tests that the pool can correctly retrieve the
// pending status of individual transactions. // pending status of individual transactions.
func TestStatusCheck(t *testing.T) { func TestStatusCheck(t *testing.T) {
@ -2566,7 +2204,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
for i := 0; i < size; i++ { for i := 0; i < size; i++ {
tx := transaction(uint64(1+i), 100000, key) tx := transaction(uint64(1+i), 100000, key)
pool.enqueueTx(tx.Hash(), tx, false, true) pool.enqueueTx(tx.Hash(), tx, true)
} }
// Benchmark the speed of pool validation // Benchmark the speed of pool validation
b.ResetTimer() b.ResetTimer()
@ -2576,15 +2214,11 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
} }
// Benchmarks the speed of batched transaction insertion. // Benchmarks the speed of batched transaction insertion.
func BenchmarkBatchInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, false) } func BenchmarkBatchInsert100(b *testing.B) { benchmarkBatchInsert(b, 100) }
func BenchmarkBatchInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, false) } func BenchmarkBatchInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000) }
func BenchmarkBatchInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, false) } func BenchmarkBatchInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000) }
func BenchmarkBatchLocalInsert100(b *testing.B) { benchmarkBatchInsert(b, 100, true) } func benchmarkBatchInsert(b *testing.B, size int) {
func BenchmarkBatchLocalInsert1000(b *testing.B) { benchmarkBatchInsert(b, 1000, true) }
func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 10000, true) }
func benchmarkBatchInsert(b *testing.B, size int, local bool) {
// Generate a batch of transactions to enqueue into the pool // Generate a batch of transactions to enqueue into the pool
pool, key := setupPool() pool, key := setupPool()
defer pool.Close() defer pool.Close()
@ -2602,46 +2236,7 @@ func benchmarkBatchInsert(b *testing.B, size int, local bool) {
// Benchmark importing the transactions into the queue // Benchmark importing the transactions into the queue
b.ResetTimer() b.ResetTimer()
for _, batch := range batches { for _, batch := range batches {
if local { pool.addRemotes(batch)
pool.addLocals(batch)
} else {
pool.addRemotes(batch)
}
}
}
func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
// Allocate keys for testing
key, _ := crypto.GenerateKey()
account := crypto.PubkeyToAddress(key.PublicKey)
remoteKey, _ := crypto.GenerateKey()
remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey)
locals := make([]*types.Transaction, 4096+1024) // Occupy all slots
for i := 0; i < len(locals); i++ {
locals[i] = transaction(uint64(i), 100000, key)
}
remotes := make([]*types.Transaction, 1000)
for i := 0; i < len(remotes); i++ {
remotes[i] = pricedTransaction(uint64(i), 100000, big.NewInt(2), remoteKey) // Higher gasprice
}
// Benchmark importing the transactions into the queue
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
pool, _ := setupPool()
testAddBalance(pool, account, big.NewInt(100000000))
for _, local := range locals {
pool.addLocal(local)
}
b.StartTimer()
// Assign a high enough balance for testing
testAddBalance(pool, remoteAddr, big.NewInt(100000000))
for i := 0; i < len(remotes); i++ {
pool.addRemotes([]*types.Transaction{remotes[i]})
}
pool.Close()
} }
} }

View file

@ -52,31 +52,31 @@ func (h *nonceHeap) Pop() interface{} {
return x return x
} }
// sortedMap is a nonce->transaction hash map with a heap based index to allow // SortedMap is a nonce->transaction hash map with a heap based index to allow
// iterating over the contents in a nonce-incrementing way. // iterating over the contents in a nonce-incrementing way.
type sortedMap struct { type SortedMap struct {
items map[uint64]*types.Transaction // Hash map storing the transaction data items map[uint64]*types.Transaction // Hash map storing the transaction data
index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode) index *nonceHeap // Heap of nonces of all the stored transactions (non-strict mode)
cache types.Transactions // Cache of the transactions already sorted cache types.Transactions // Cache of the transactions already sorted
cacheMu sync.Mutex // Mutex covering the cache cacheMu sync.Mutex // Mutex covering the cache
} }
// newSortedMap creates a new nonce-sorted transaction map. // NewSortedMap creates a new nonce-sorted transaction map.
func newSortedMap() *sortedMap { func NewSortedMap() *SortedMap {
return &sortedMap{ return &SortedMap{
items: make(map[uint64]*types.Transaction), items: make(map[uint64]*types.Transaction),
index: new(nonceHeap), index: new(nonceHeap),
} }
} }
// Get retrieves the current transactions associated with the given nonce. // Get retrieves the current transactions associated with the given nonce.
func (m *sortedMap) Get(nonce uint64) *types.Transaction { func (m *SortedMap) Get(nonce uint64) *types.Transaction {
return m.items[nonce] return m.items[nonce]
} }
// Put inserts a new transaction into the map, also updating the map's nonce // Put inserts a new transaction into the map, also updating the map's nonce
// index. If a transaction already exists with the same nonce, it's overwritten. // index. If a transaction already exists with the same nonce, it's overwritten.
func (m *sortedMap) Put(tx *types.Transaction) { func (m *SortedMap) Put(tx *types.Transaction) {
nonce := tx.Nonce() nonce := tx.Nonce()
if m.items[nonce] == nil { if m.items[nonce] == nil {
heap.Push(m.index, nonce) heap.Push(m.index, nonce)
@ -89,7 +89,7 @@ func (m *sortedMap) Put(tx *types.Transaction) {
// Forward removes all transactions from the map with a nonce lower than the // Forward removes all transactions from the map with a nonce lower than the
// provided threshold. Every removed transaction is returned for any post-removal // provided threshold. Every removed transaction is returned for any post-removal
// maintenance. // maintenance.
func (m *sortedMap) Forward(threshold uint64) types.Transactions { func (m *SortedMap) Forward(threshold uint64) types.Transactions {
var removed types.Transactions var removed types.Transactions
// Pop off heap items until the threshold is reached // Pop off heap items until the threshold is reached
@ -112,7 +112,7 @@ func (m *sortedMap) Forward(threshold uint64) types.Transactions {
// Filter, as opposed to 'filter', re-initialises the heap after the operation is done. // Filter, as opposed to 'filter', re-initialises the heap after the operation is done.
// If you want to do several consecutive filterings, it's therefore better to first // If you want to do several consecutive filterings, it's therefore better to first
// do a .filter(func1) followed by .Filter(func2) or reheap() // do a .filter(func1) followed by .Filter(func2) or reheap()
func (m *sortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions { func (m *SortedMap) Filter(filter func(*types.Transaction) bool) types.Transactions {
removed := m.filter(filter) removed := m.filter(filter)
// If transactions were removed, the heap and cache are ruined // If transactions were removed, the heap and cache are ruined
if len(removed) > 0 { if len(removed) > 0 {
@ -121,7 +121,7 @@ func (m *sortedMap) Filter(filter func(*types.Transaction) bool) types.Transacti
return removed return removed
} }
func (m *sortedMap) reheap() { func (m *SortedMap) reheap() {
*m.index = make([]uint64, 0, len(m.items)) *m.index = make([]uint64, 0, len(m.items))
for nonce := range m.items { for nonce := range m.items {
*m.index = append(*m.index, nonce) *m.index = append(*m.index, nonce)
@ -134,7 +134,7 @@ func (m *sortedMap) reheap() {
// filter is identical to Filter, but **does not** regenerate the heap. This method // filter is identical to Filter, but **does not** regenerate the heap. This method
// should only be used if followed immediately by a call to Filter or reheap() // should only be used if followed immediately by a call to Filter or reheap()
func (m *sortedMap) filter(filter func(*types.Transaction) bool) types.Transactions { func (m *SortedMap) filter(filter func(*types.Transaction) bool) types.Transactions {
var removed types.Transactions var removed types.Transactions
// Collect all the transactions to filter out // Collect all the transactions to filter out
@ -154,7 +154,7 @@ func (m *sortedMap) filter(filter func(*types.Transaction) bool) types.Transacti
// Cap places a hard limit on the number of items, returning all transactions // Cap places a hard limit on the number of items, returning all transactions
// exceeding that limit. // exceeding that limit.
func (m *sortedMap) Cap(threshold int) types.Transactions { func (m *SortedMap) Cap(threshold int) types.Transactions {
// Short circuit if the number of items is under the limit // Short circuit if the number of items is under the limit
if len(m.items) <= threshold { if len(m.items) <= threshold {
return nil return nil
@ -181,7 +181,7 @@ func (m *sortedMap) Cap(threshold int) types.Transactions {
// Remove deletes a transaction from the maintained map, returning whether the // Remove deletes a transaction from the maintained map, returning whether the
// transaction was found. // transaction was found.
func (m *sortedMap) Remove(nonce uint64) bool { func (m *SortedMap) Remove(nonce uint64) bool {
// Short circuit if no transaction is present // Short circuit if no transaction is present
_, ok := m.items[nonce] _, ok := m.items[nonce]
if !ok { if !ok {
@ -209,7 +209,7 @@ func (m *sortedMap) Remove(nonce uint64) bool {
// Note, all transactions with nonces lower than start will also be returned to // Note, all transactions with nonces lower than start will also be returned to
// prevent getting into an invalid state. This is not something that should ever // prevent getting into an invalid state. This is not something that should ever
// happen but better to be self correcting than failing! // happen but better to be self correcting than failing!
func (m *sortedMap) Ready(start uint64) types.Transactions { func (m *SortedMap) Ready(start uint64) types.Transactions {
// Short circuit if no transactions are available // Short circuit if no transactions are available
if m.index.Len() == 0 || (*m.index)[0] > start { if m.index.Len() == 0 || (*m.index)[0] > start {
return nil return nil
@ -229,11 +229,11 @@ func (m *sortedMap) Ready(start uint64) types.Transactions {
} }
// Len returns the length of the transaction map. // Len returns the length of the transaction map.
func (m *sortedMap) Len() int { func (m *SortedMap) Len() int {
return len(m.items) return len(m.items)
} }
func (m *sortedMap) flatten() types.Transactions { func (m *SortedMap) flatten() types.Transactions {
m.cacheMu.Lock() m.cacheMu.Lock()
defer m.cacheMu.Unlock() defer m.cacheMu.Unlock()
// If the sorting was not cached yet, create and cache it // If the sorting was not cached yet, create and cache it
@ -250,7 +250,7 @@ func (m *sortedMap) flatten() types.Transactions {
// Flatten creates a nonce-sorted slice of transactions based on the loosely // Flatten creates a nonce-sorted slice of transactions based on the loosely
// sorted internal representation. The result of the sorting is cached in case // sorted internal representation. The result of the sorting is cached in case
// it's requested again before any modifications are made to the contents. // it's requested again before any modifications are made to the contents.
func (m *sortedMap) Flatten() types.Transactions { func (m *SortedMap) Flatten() types.Transactions {
cache := m.flatten() cache := m.flatten()
// Copy the cache to prevent accidental modification // Copy the cache to prevent accidental modification
txs := make(types.Transactions, len(cache)) txs := make(types.Transactions, len(cache))
@ -260,7 +260,7 @@ func (m *sortedMap) Flatten() types.Transactions {
// LastElement returns the last element of a flattened list, thus, the // LastElement returns the last element of a flattened list, thus, the
// transaction with the highest nonce // transaction with the highest nonce
func (m *sortedMap) LastElement() *types.Transaction { func (m *SortedMap) LastElement() *types.Transaction {
cache := m.flatten() cache := m.flatten()
return cache[len(cache)-1] return cache[len(cache)-1]
} }
@ -271,7 +271,7 @@ func (m *sortedMap) LastElement() *types.Transaction {
// executable/future queue, with minor behavioral changes. // executable/future queue, with minor behavioral changes.
type list struct { type list struct {
strict bool // Whether nonces are strictly continuous or not strict bool // Whether nonces are strictly continuous or not
txs *sortedMap // Heap indexed sorted hash map of the transactions txs *SortedMap // Heap indexed sorted hash map of the transactions
costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance) costcap *uint256.Int // Price of the highest costing transaction (reset only if exceeds balance)
gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit) gascap uint64 // Gas limit of the highest spending transaction (reset only if exceeds block limit)
@ -283,7 +283,7 @@ type list struct {
func newList(strict bool) *list { func newList(strict bool) *list {
return &list{ return &list{
strict: strict, strict: strict,
txs: newSortedMap(), txs: NewSortedMap(),
costcap: new(uint256.Int), costcap: new(uint256.Int),
totalcost: new(uint256.Int), totalcost: new(uint256.Int),
} }
@ -556,10 +556,7 @@ func newPricedList(all *lookup) *pricedList {
} }
// Put inserts a new transaction into the heap. // Put inserts a new transaction into the heap.
func (l *pricedList) Put(tx *types.Transaction, local bool) { func (l *pricedList) Put(tx *types.Transaction) {
if local {
return
}
// Insert every new transaction to the urgent heap first; Discard will balance the heaps // Insert every new transaction to the urgent heap first; Discard will balance the heaps
heap.Push(&l.urgent, tx) heap.Push(&l.urgent, tx)
} }
@ -593,7 +590,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool {
// Discard stale price points if found at the heap start // Discard stale price points if found at the heap start
for len(h.list) > 0 { for len(h.list) > 0 {
head := h.list[0] head := h.list[0]
if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated if l.all.Get(head.Hash()) == nil { // Removed or migrated
l.stales.Add(-1) l.stales.Add(-1)
heap.Pop(h) heap.Pop(h)
continue continue
@ -612,15 +609,13 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool {
// Discard finds a number of most underpriced transactions, removes them from the // Discard finds a number of most underpriced transactions, removes them from the
// priced list and returns them for further removal from the entire pool. // priced list and returns them for further removal from the entire pool.
// If noPending is set to true, we will only consider the floating list // If noPending is set to true, we will only consider the floating list
// func (l *pricedList) Discard(slots int) (types.Transactions, bool) {
// Note local transaction won't be considered for eviction.
func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) {
drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop
for slots > 0 { for slots > 0 {
if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio { if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio {
// Discard stale transactions if found during cleanup // Discard stale transactions if found during cleanup
tx := heap.Pop(&l.urgent).(*types.Transaction) tx := heap.Pop(&l.urgent).(*types.Transaction)
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated if l.all.Get(tx.Hash()) == nil { // Removed or migrated
l.stales.Add(-1) l.stales.Add(-1)
continue continue
} }
@ -633,7 +628,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) {
} }
// Discard stale transactions if found during cleanup // Discard stale transactions if found during cleanup
tx := heap.Pop(&l.floating).(*types.Transaction) tx := heap.Pop(&l.floating).(*types.Transaction)
if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated if l.all.Get(tx.Hash()) == nil { // Removed or migrated
l.stales.Add(-1) l.stales.Add(-1)
continue continue
} }
@ -643,7 +638,7 @@ func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) {
} }
} }
// If we still can't make enough room for the new transaction // If we still can't make enough room for the new transaction
if slots > 0 && !force { if slots > 0 {
for _, tx := range drop { for _, tx := range drop {
heap.Push(&l.urgent, tx) heap.Push(&l.urgent, tx)
} }
@ -658,11 +653,11 @@ func (l *pricedList) Reheap() {
defer l.reheapMu.Unlock() defer l.reheapMu.Unlock()
start := time.Now() start := time.Now()
l.stales.Store(0) l.stales.Store(0)
l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount()) l.urgent.list = make([]*types.Transaction, 0, l.all.Count())
l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { l.all.Range(func(hash common.Hash, tx *types.Transaction) bool {
l.urgent.list = append(l.urgent.list, tx) l.urgent.list = append(l.urgent.list, tx)
return true return true
}, false, true) // Only iterate remotes })
heap.Init(&l.urgent) heap.Init(&l.urgent)
// balance out the two heaps by moving the worse half of transactions into the // balance out the two heaps by moving the worse half of transactions into the

View file

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package legacypool package locals
import ( import (
"errors" "errors"

View file

@ -0,0 +1,212 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package locals implements tracking for "local" transactions
package locals
import (
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/exp/slices"
)
var (
recheckInterval = time.Minute
localGauge = metrics.GetOrRegisterGauge("txpool/local", nil)
)
// TxTracker is a struct used to track priority transactions; it will check from
// time to time if the main pool has forgotten about any of the transaction
// it is tracking, and if so, submit it again.
// This is used to track 'locals'.
// This struct does not care about transaction validity, price-bumps or account limits,
// but optimistically accepts transactions.
type TxTracker struct {
all map[common.Hash]*types.Transaction // All tracked transactions
byAddr map[common.Address]*legacypool.SortedMap // Transactions by address
journal *journal // Journal of local transaction to back up to disk
rejournal time.Duration // How often to rotate journal
pool *txpool.TxPool // The tx pool to interact with
signer types.Signer
shutdownCh chan struct{}
mu sync.Mutex
wg sync.WaitGroup
}
// New creates a new TxTracker
func New(journalPath string, journalTime time.Duration, chainConfig *params.ChainConfig, next *txpool.TxPool) *TxTracker {
pool := &TxTracker{
all: make(map[common.Hash]*types.Transaction),
byAddr: make(map[common.Address]*legacypool.SortedMap),
signer: types.LatestSigner(chainConfig),
shutdownCh: make(chan struct{}),
pool: next,
}
if journalPath != "" {
pool.journal = newTxJournal(journalPath)
pool.rejournal = journalTime
}
return pool
}
// Track adds a transaction to the tracked set.
// Note: blob-type transactions are ignored.
func (tracker *TxTracker) Track(tx *types.Transaction) {
tracker.TrackAll([]*types.Transaction{tx})
}
// TrackAll adds a list of transactions to the tracked set.
// Note: blob-type transactions are ignored.
func (tracker *TxTracker) TrackAll(txs []*types.Transaction) {
tracker.mu.Lock()
defer tracker.mu.Unlock()
for _, tx := range txs {
if tx.Type() == types.BlobTxType {
continue
}
// If we're already tracking it, it's a no-op
if _, ok := tracker.all[tx.Hash()]; ok {
continue
}
addr, err := types.Sender(tracker.signer, tx)
if err != nil { // Ignore this tx
continue
}
tracker.all[tx.Hash()] = tx
if tracker.byAddr[addr] == nil {
tracker.byAddr[addr] = legacypool.NewSortedMap()
}
tracker.byAddr[addr].Put(tx)
if tracker.journal != nil {
_ = tracker.journal.insert(tx)
}
}
localGauge.Update(int64(len(tracker.all)))
}
// recheck checks and returns any transactions that needs to be resubmitted.
func (tracker *TxTracker) recheck(journalCheck bool) (resubmits []*types.Transaction, rejournal map[common.Address]types.Transactions) {
tracker.mu.Lock()
defer tracker.mu.Unlock()
var (
numStales = 0
numOk = 0
)
for sender, txs := range tracker.byAddr {
// Wipe the stales
stales := txs.Forward(tracker.pool.Nonce(sender))
for _, tx := range stales {
delete(tracker.all, tx.Hash())
}
numStales += len(stales)
// Check the non-stale
for _, tx := range txs.Flatten() {
if tracker.pool.Has(tx.Hash()) {
numOk++
continue
}
resubmits = append(resubmits, tx)
}
}
if journalCheck { // rejournal
rejournal = make(map[common.Address]types.Transactions)
for _, tx := range tracker.all {
addr, _ := types.Sender(tracker.signer, tx)
rejournal[addr] = append(rejournal[addr], tx)
}
// Sort them
for _, list := range rejournal {
// cmp(a, b) should return a negative number when a < b,
slices.SortFunc(list, func(a, b *types.Transaction) int {
return int(a.Nonce() - b.Nonce())
})
}
}
localGauge.Update(int64(len(tracker.all)))
log.Debug("Tx tracker status", "need-resubmit", len(resubmits), "stale", numStales, "ok", numOk)
return resubmits, rejournal
}
// Start implements node.Lifecycle interface
// Start is called after all services have been constructed and the networking
// layer was also initialized to spawn any goroutines required by the service.
func (tracker *TxTracker) Start() error {
tracker.wg.Add(1)
go tracker.loop()
return nil
}
// Stop implements node.Lifecycle interface
// Stop terminates all goroutines belonging to the service, blocking until they
// are all terminated.
func (tracker *TxTracker) Stop() error {
close(tracker.shutdownCh)
tracker.wg.Wait()
return nil
}
func (tracker *TxTracker) loop() {
defer tracker.wg.Done()
if tracker.journal != nil {
tracker.journal.load(func(transactions []*types.Transaction) []error {
tracker.TrackAll(transactions)
return nil
})
defer tracker.journal.close()
}
var (
lastJournal = time.Now()
timer = time.NewTimer(10 * time.Second) // Do initial check after 10 seconds, do rechecks more seldom.
)
for {
select {
case <-tracker.shutdownCh:
return
case <-timer.C:
checkJournal := tracker.journal != nil && time.Since(lastJournal) > tracker.rejournal
resubmits, rejournal := tracker.recheck(checkJournal)
if len(resubmits) > 0 {
tracker.pool.Add(resubmits, false)
}
if checkJournal {
// Lock to prevent journal.rotate <-> journal.insert (via TrackAll) conflicts
tracker.mu.Lock()
lastJournal = time.Now()
if err := tracker.journal.rotate(rejournal); err != nil {
log.Warn("Transaction journal rotation failed", "err", err)
}
tracker.mu.Unlock()
}
timer.Reset(recheckInterval)
}
}
}

View file

@ -132,7 +132,7 @@ type SubPool interface {
// Add enqueues a batch of transactions into the pool if they are valid. Due // Add enqueues a batch of transactions into the pool if they are valid. Due
// to the large transaction churn, add may postpone fully integrating the tx // to the large transaction churn, add may postpone fully integrating the tx
// to a later point to batch multiple ones together. // to a later point to batch multiple ones together.
Add(txs []*types.Transaction, local bool, sync bool) []error Add(txs []*types.Transaction, sync bool) []error
// Pending retrieves all currently processable transactions, grouped by origin // Pending retrieves all currently processable transactions, grouped by origin
// account and sorted by nonce. // account and sorted by nonce.
@ -162,9 +162,6 @@ type SubPool interface {
// pending as well as queued transactions of this address, grouped by nonce. // pending as well as queued transactions of this address, grouped by nonce.
ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction)
// Locals retrieves the accounts currently considered local by the pool.
Locals() []common.Address
// Status returns the known status (unknown/pending/queued) of a transaction // Status returns the known status (unknown/pending/queued) of a transaction
// identified by their hashes. // identified by their hashes.
Status(hash common.Hash) TxStatus Status(hash common.Hash) TxStatus

View file

@ -1,4 +1,4 @@
// Copyright 2023 The go-ethereum Authors // Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
@ -328,7 +328,7 @@ func (p *TxPool) GetBlobs(vhashes []common.Hash) ([]*kzg4844.Blob, []*kzg4844.Pr
// Add enqueues a batch of transactions into the pool if they are valid. Due // Add enqueues a batch of transactions into the pool if they are valid. Due
// to the large transaction churn, add may postpone fully integrating the tx // to the large transaction churn, add may postpone fully integrating the tx
// to a later point to batch multiple ones together. // to a later point to batch multiple ones together.
func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { func (p *TxPool) Add(txs []*types.Transaction, sync bool) []error {
// Split the input transactions between the subpools. It shouldn't really // Split the input transactions between the subpools. It shouldn't really
// happen that we receive merged batches, but better graceful than strange // happen that we receive merged batches, but better graceful than strange
// errors. // errors.
@ -355,7 +355,7 @@ func (p *TxPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
// back the errors into the original sort order. // back the errors into the original sort order.
errsets := make([][]error, len(p.subpools)) errsets := make([][]error, len(p.subpools))
for i := 0; i < len(p.subpools); i++ { for i := 0; i < len(p.subpools); i++ {
errsets[i] = p.subpools[i].Add(txsets[i], local, sync) errsets[i] = p.subpools[i].Add(txsets[i], sync)
} }
errs := make([]error, len(txs)) errs := make([]error, len(txs))
for i, split := range splits { for i, split := range splits {
@ -456,23 +456,6 @@ func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*type
return []*types.Transaction{}, []*types.Transaction{} return []*types.Transaction{}, []*types.Transaction{}
} }
// Locals retrieves the accounts currently considered local by the pool.
func (p *TxPool) Locals() []common.Address {
// Retrieve the locals from each subpool and deduplicate them
locals := make(map[common.Address]struct{})
for _, subpool := range p.subpools {
for _, local := range subpool.Locals() {
locals[local] = struct{}{}
}
}
// Flatten and return the deduplicated local set
flat := make([]common.Address, 0, len(locals))
for local := range locals {
flat = append(flat, local)
}
return flat
}
// Status returns the known status (unknown/pending/queued) of a transaction // Status returns the known status (unknown/pending/queued) of a transaction
// identified by its hash. // identified by its hash.
func (p *TxPool) Status(hash common.Hash) TxStatus { func (p *TxPool) Status(hash common.Hash) TxStatus {

View file

@ -23,6 +23,7 @@ import (
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -144,8 +145,9 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
if len(hashes) == 0 { if len(hashes) == 0 {
return errors.New("blobless blob transaction") return errors.New("blobless blob transaction")
} }
if len(hashes) > params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob { maxBlobs := eip4844.MaxBlobsPerBlock(opts.Config, head.Time)
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.MaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) if len(hashes) > maxBlobs {
return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), maxBlobs)
} }
// Ensure commitments, proofs and hashes are valid // Ensure commitments, proofs and hashes are valid
if err := validateBlobSidecar(hashes, sidecar); err != nil { if err := validateBlobSidecar(hashes, sidecar); err != nil {

View file

@ -1,3 +1,19 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types package types
import ( import (

View file

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
@ -58,6 +59,9 @@ var (
VerkleTime: u64(0), VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0, TerminalTotalDifficulty: common.Big0,
EnableVerkleAtGenesis: true, EnableVerkleAtGenesis: true,
BlobScheduleConfig: &params.BlobScheduleConfig{
Verkle: params.DefaultPragueBlobConfig,
},
// TODO uncomment when proof generation is merged // TODO uncomment when proof generation is merged
// ProofInBlocks: true, // ProofInBlocks: true,
} }
@ -79,6 +83,9 @@ var (
VerkleTime: u64(0), VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0, TerminalTotalDifficulty: common.Big0,
EnableVerkleAtGenesis: true, EnableVerkleAtGenesis: true,
BlobScheduleConfig: &params.BlobScheduleConfig{
Verkle: params.DefaultPragueBlobConfig,
},
} }
) )
@ -220,17 +227,17 @@ func TestProcessParentBlockHash(t *testing.T) {
// block 2 parent hash is 0x0200.... // block 2 parent hash is 0x0200....
// etc // etc
checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) { checkBlockHashes := func(statedb *state.StateDB, isVerkle bool) {
statedb.SetNonce(params.HistoryStorageAddress, 1) statedb.SetNonce(params.HistoryStorageAddress, 1, tracing.NonceChangeUnspecified)
statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode) statedb.SetCode(params.HistoryStorageAddress, params.HistoryStorageCode)
// Process n blocks, from 1 .. num // Process n blocks, from 1 .. num
var num = 2 var num = 2
for i := 1; i <= num; i++ { for i := 1; i <= num; i++ {
header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)} header := &types.Header{ParentHash: common.Hash{byte(i)}, Number: big.NewInt(int64(i)), Difficulty: new(big.Int)}
vmContext := NewEVMBlockContext(header, nil, new(common.Address))
chainConfig := params.MergedTestChainConfig chainConfig := params.MergedTestChainConfig
if isVerkle { if isVerkle {
chainConfig = testVerkleChainConfig chainConfig = testVerkleChainConfig
} }
vmContext := NewEVMBlockContext(header, nil, new(common.Address))
evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{}) evm := vm.NewEVM(vmContext, statedb, chainConfig, vm.Config{})
ProcessParentBlockHash(header.ParentHash, evm) ProcessParentBlockHash(header.ParentHash, evm)
} }

View file

@ -1,18 +1,18 @@
// Copyright 2024 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of go-ethereum. // This file is part of the go-ethereum library.
// //
// go-ethereum is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by // it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or // the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. // (at your option) any later version.
// //
// go-ethereum is distributed in the hope that it will be useful, // The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details. // GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU Lesser General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm package vm

View file

@ -1,4 +1,4 @@
// Copyright 2022 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify

View file

@ -474,7 +474,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
if nonce+1 < nonce { if nonce+1 < nonce {
return nil, common.Address{}, gas, ErrNonceUintOverflow return nil, common.Address{}, gas, ErrNonceUintOverflow
} }
evm.StateDB.SetNonce(caller.Address(), nonce+1) evm.StateDB.SetNonce(caller.Address(), nonce+1, tracing.NonceChangeContractCreator)
// Charge the contract creation init gas in verkle mode // Charge the contract creation init gas in verkle mode
if evm.chainRules.IsEIP4762 { if evm.chainRules.IsEIP4762 {
@ -522,7 +522,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
evm.StateDB.CreateContract(address) evm.StateDB.CreateContract(address)
if evm.chainRules.IsEIP158 { if evm.chainRules.IsEIP158 {
evm.StateDB.SetNonce(address, 1) evm.StateDB.SetNonce(address, 1, tracing.NonceChangeNewContract)
} }
// Charge the contract creation init gas in verkle mode // Charge the contract creation init gas in verkle mode
if evm.chainRules.IsEIP4762 { if evm.chainRules.IsEIP4762 {

View file

@ -477,6 +477,9 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) (
if witness := interpreter.evm.StateDB.Witness(); witness != nil { if witness := interpreter.evm.StateDB.Witness(); witness != nil {
witness.AddBlockHash(num64) witness.AddBlockHash(num64)
} }
if tracer := interpreter.evm.Config.Tracer; tracer != nil && tracer.OnBlockHashRead != nil {
tracer.OnBlockHashRead(num64, res)
}
num.SetBytes(res[:]) num.SetBytes(res[:])
} else { } else {
num.Clear() num.Clear()

View file

@ -39,7 +39,7 @@ type StateDB interface {
GetBalance(common.Address) *uint256.Int GetBalance(common.Address) *uint256.Int
GetNonce(common.Address) uint64 GetNonce(common.Address) uint64
SetNonce(common.Address, uint64) SetNonce(common.Address, uint64, tracing.NonceChangeReason)
GetCodeHash(common.Address) common.Hash GetCodeHash(common.Address) common.Hash
GetCode(common.Address) []byte GetCode(common.Address) []byte

View file

@ -28,8 +28,10 @@ func LookupInstructionSet(rules params.Rules) (JumpTable, error) {
switch { switch {
case rules.IsVerkle: case rules.IsVerkle:
return newCancunInstructionSet(), errors.New("verkle-fork not defined yet") return newCancunInstructionSet(), errors.New("verkle-fork not defined yet")
case rules.IsOsaka:
return newPragueInstructionSet(), errors.New("osaka-fork not defined yet")
case rules.IsPrague: case rules.IsPrague:
return newCancunInstructionSet(), errors.New("prague-fork not defined yet") return newPragueInstructionSet(), nil
case rules.IsCancun: case rules.IsCancun:
return newCancunInstructionSet(), nil return newCancunInstructionSet(), nil
case rules.IsShanghai: case rules.IsShanghai:

View file

@ -1,4 +1,4 @@
// Copyright 2022 The go-ethereum Authors // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify

View file

@ -1,3 +1,19 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package vm package vm
import ( import (

View file

@ -1,18 +1,18 @@
// Copyright 2024 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by // it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or // the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. // (at your option) any later version.
// //
// This library is distributed in the hope that it will be useful, // The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details. // GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the goevmlab library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// package program is a utility to create EVM bytecode for testing, but _not_ for production. As such: // package program is a utility to create EVM bytecode for testing, but _not_ for production. As such:
// //

View file

@ -1,18 +1,18 @@
// Copyright 2024 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by // it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or // the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version. // (at your option) any later version.
// //
// This library is distributed in the hope that it will be useful, // The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details. // GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the goevmlab library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package program package program

View file

@ -314,6 +314,10 @@ func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header {
return fakeHeader(n, parentHash) return fakeHeader(n, parentHash)
} }
func (d *dummyChain) Config() *params.ChainConfig {
return nil
}
// TestBlockhash tests the blockhash operation. It's a bit special, since it internally // TestBlockhash tests the blockhash operation. It's a bit special, since it internally
// requires access to a chain reader. // requires access to a chain reader.
func TestBlockhash(t *testing.T) { func TestBlockhash(t *testing.T) {
@ -410,7 +414,7 @@ func benchmarkNonModifyingCode(gas uint64, code []byte, name string, tracerCode
eoa := common.HexToAddress("E0") eoa := common.HexToAddress("E0")
{ {
cfg.State.CreateAccount(eoa) cfg.State.CreateAccount(eoa)
cfg.State.SetNonce(eoa, 100) cfg.State.SetNonce(eoa, 100, tracing.NonceChangeUnspecified)
} }
reverting := common.HexToAddress("EE") reverting := common.HexToAddress("EE")
{ {

View file

@ -272,7 +272,10 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri
} }
func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {
return b.eth.txPool.Add([]*types.Transaction{signedTx}, true, false)[0] if locals := b.eth.localTxTracker; locals != nil {
locals.Track(signedTx)
}
return b.eth.txPool.Add([]*types.Transaction{signedTx}, false)[0]
} }
func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {
@ -356,7 +359,7 @@ func (b *EthAPIBackend) FeeHistory(ctx context.Context, blockCount uint64, lastB
func (b *EthAPIBackend) BlobBaseFee(ctx context.Context) *big.Int { func (b *EthAPIBackend) BlobBaseFee(ctx context.Context) *big.Int {
if excess := b.CurrentHeader().ExcessBlobGas; excess != nil { if excess := b.CurrentHeader().ExcessBlobGas; excess != nil {
return eip4844.CalcBlobFee(*excess) return eip4844.CalcBlobFee(b.ChainConfig(), b.CurrentHeader())
} }
return nil return nil
} }

View file

@ -23,6 +23,7 @@ import (
"math/big" "math/big"
"runtime" "runtime"
"sync" "sync"
"time"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -35,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/blobpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/txpool/locals"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
@ -67,9 +69,10 @@ type Config = ethconfig.Config
// Ethereum implements the Ethereum full node service. // Ethereum implements the Ethereum full node service.
type Ethereum struct { type Ethereum struct {
// core protocol objects // core protocol objects
config *ethconfig.Config config *ethconfig.Config
txPool *txpool.TxPool txPool *txpool.TxPool
blockchain *core.BlockChain localTxTracker *locals.TxTracker
blockchain *core.BlockChain
handler *handler handler *handler
discmix *enode.FairMix discmix *enode.FairMix
@ -237,6 +240,16 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if !config.TxPool.NoLocals {
rejournal := config.TxPool.Rejournal
if rejournal < time.Second {
log.Warn("Sanitizing invalid txpool journal time", "provided", rejournal, "updated", time.Second)
rejournal = time.Second
}
eth.localTxTracker = locals.New(config.TxPool.Journal, rejournal, eth.blockchain.Config(), eth.txPool)
stack.RegisterLifecycle(eth.localTxTracker)
}
// Permit the downloader to use the trie cache allowance during fast sync // Permit the downloader to use the trie cache allowance during fast sync
cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit cacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit
if eth.handler, err = newHandler(&handlerConfig{ if eth.handler, err = newHandler(&handlerConfig{
@ -255,6 +268,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
eth.miner = miner.New(eth, config.Miner, eth.engine) eth.miner = miner.New(eth, config.Miner, eth.engine)
eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))
eth.miner.SetPrioAddresses(config.TxPool.Locals)
eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}
if eth.APIBackend.allowUnprotectedTxs { if eth.APIBackend.allowUnprotectedTxs {

View file

@ -115,7 +115,7 @@ func TestEth2AssembleBlock(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error signing transaction, err=%v", err) t.Fatalf("error signing transaction, err=%v", err)
} }
ethservice.TxPool().Add([]*types.Transaction{tx}, true, true) ethservice.TxPool().Add([]*types.Transaction{tx}, true)
blockParams := engine.PayloadAttributes{ blockParams := engine.PayloadAttributes{
Timestamp: blocks[9].Time() + 5, Timestamp: blocks[9].Time() + 5,
} }
@ -152,7 +152,7 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
// Put the 10th block's tx in the pool and produce a new block // Put the 10th block's tx in the pool and produce a new block
txs := blocks[9].Transactions() txs := blocks[9].Transactions()
api.eth.TxPool().Add(txs, false, true) api.eth.TxPool().Add(txs, true)
blockParams := engine.PayloadAttributes{ blockParams := engine.PayloadAttributes{
Timestamp: blocks[8].Time() + 5, Timestamp: blocks[8].Time() + 5,
} }
@ -174,7 +174,7 @@ func TestEth2PrepareAndGetPayload(t *testing.T) {
// Put the 10th block's tx in the pool and produce a new block // Put the 10th block's tx in the pool and produce a new block
txs := blocks[9].Transactions() txs := blocks[9].Transactions()
ethservice.TxPool().Add(txs, true, true) ethservice.TxPool().Add(txs, true)
blockParams := engine.PayloadAttributes{ blockParams := engine.PayloadAttributes{
Timestamp: blocks[8].Time() + 5, Timestamp: blocks[8].Time() + 5,
} }
@ -294,7 +294,7 @@ func TestEth2NewBlock(t *testing.T) {
statedb, _ := ethservice.BlockChain().StateAt(parent.Root()) statedb, _ := ethservice.BlockChain().StateAt(parent.Root())
nonce := statedb.GetNonce(testAddr) nonce := statedb.GetNonce(testAddr)
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().Add([]*types.Transaction{tx}, true, true) ethservice.TxPool().Add([]*types.Transaction{tx}, true)
execData, err := assembleWithTransactions(api, parent.Hash(), &engine.PayloadAttributes{ execData, err := assembleWithTransactions(api, parent.Hash(), &engine.PayloadAttributes{
Timestamp: parent.Time() + 5, Timestamp: parent.Time() + 5,
@ -463,7 +463,7 @@ func TestFullAPI(t *testing.T) {
statedb, _ := ethservice.BlockChain().StateAt(parent.Root) statedb, _ := ethservice.BlockChain().StateAt(parent.Root)
nonce := statedb.GetNonce(testAddr) nonce := statedb.GetNonce(testAddr)
tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey) tx, _ := types.SignTx(types.NewContractCreation(nonce, new(big.Int), 1000000, big.NewInt(2*params.InitialBaseFee), logCode), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
ethservice.TxPool().Add([]*types.Transaction{tx}, true, false) ethservice.TxPool().Add([]*types.Transaction{tx}, false)
} }
setupBlocks(t, ethservice, 10, parent, callback, nil, nil) setupBlocks(t, ethservice, 10, parent, callback, nil, nil)
@ -594,7 +594,7 @@ func TestNewPayloadOnInvalidChain(t *testing.T) {
GasPrice: big.NewInt(2 * params.InitialBaseFee), GasPrice: big.NewInt(2 * params.InitialBaseFee),
Data: logCode, Data: logCode,
}) })
ethservice.TxPool().Add([]*types.Transaction{tx}, false, true) ethservice.TxPool().Add([]*types.Transaction{tx}, true)
var ( var (
params = engine.PayloadAttributes{ params = engine.PayloadAttributes{
Timestamp: parent.Time + 1, Timestamp: parent.Time + 1,
@ -1227,6 +1227,7 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
genesis.Config.ShanghaiTime = &time genesis.Config.ShanghaiTime = &time
genesis.Config.CancunTime = &time genesis.Config.CancunTime = &time
genesis.Config.PragueTime = &time genesis.Config.PragueTime = &time
genesis.Config.BlobScheduleConfig = params.DefaultBlobSchedule
n, ethservice := startEthService(t, genesis, blocks) n, ethservice := startEthService(t, genesis, blocks)
@ -1245,8 +1246,8 @@ func setupBodies(t *testing.T) (*node.Node, *eth.Ethereum, []*types.Block) {
// Create tx to trigger deposit generator. // Create tx to trigger deposit generator.
tx2, _ = types.SignTx(types.NewTransaction(statedb.GetNonce(testAddr)+1, ethservice.APIBackend.ChainConfig().DepositContractAddress, new(big.Int), 500000, big.NewInt(2*params.InitialBaseFee), nil), types.LatestSigner(ethservice.BlockChain().Config()), testKey) tx2, _ = types.SignTx(types.NewTransaction(statedb.GetNonce(testAddr)+1, ethservice.APIBackend.ChainConfig().DepositContractAddress, new(big.Int), 500000, big.NewInt(2*params.InitialBaseFee), nil), types.LatestSigner(ethservice.BlockChain().Config()), testKey)
) )
ethservice.TxPool().Add([]*types.Transaction{tx1}, false, false) ethservice.TxPool().Add([]*types.Transaction{tx1}, false)
ethservice.TxPool().Add([]*types.Transaction{tx2}, false, false) ethservice.TxPool().Add([]*types.Transaction{tx2}, false)
} }
// Make some withdrawals to include. // Make some withdrawals to include.
@ -1543,6 +1544,7 @@ func TestParentBeaconBlockRoot(t *testing.T) {
time := blocks[len(blocks)-1].Time() + 5 time := blocks[len(blocks)-1].Time() + 5
genesis.Config.ShanghaiTime = &time genesis.Config.ShanghaiTime = &time
genesis.Config.CancunTime = &time genesis.Config.CancunTime = &time
genesis.Config.BlobScheduleConfig = params.DefaultBlobSchedule
n, ethservice := startEthService(t, genesis, blocks) n, ethservice := startEthService(t, genesis, blocks)
defer n.Close() defer n.Close()
@ -1625,6 +1627,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) {
timestamp := blocks[len(blocks)-2].Time() + 5 timestamp := blocks[len(blocks)-2].Time() + 5
genesis.Config.ShanghaiTime = &timestamp genesis.Config.ShanghaiTime = &timestamp
genesis.Config.CancunTime = &timestamp genesis.Config.CancunTime = &timestamp
genesis.Config.BlobScheduleConfig = params.DefaultBlobSchedule
n, ethservice := startEthService(t, genesis, blocks[:9]) n, ethservice := startEthService(t, genesis, blocks[:9])
defer n.Close() defer n.Close()
@ -1634,7 +1637,7 @@ func TestWitnessCreationAndConsumption(t *testing.T) {
// Put the 10th block's tx in the pool and produce a new block // Put the 10th block's tx in the pool and produce a new block
txs := blocks[9].Transactions() txs := blocks[9].Transactions()
ethservice.TxPool().Add(txs, true, true) ethservice.TxPool().Add(txs, true)
blockParams := engine.PayloadAttributes{ blockParams := engine.PayloadAttributes{
Timestamp: blocks[8].Time() + 5, Timestamp: blocks[8].Time() + 5,
Withdrawals: make([]*types.Withdrawal, 0), Withdrawals: make([]*types.Withdrawal, 0),

View file

@ -18,6 +18,7 @@ package catalyst
import ( import (
"context" "context"
"fmt"
"math/big" "math/big"
"testing" "testing"
"time" "time"
@ -143,9 +144,14 @@ func TestSimulatedBeaconSendWithdrawals(t *testing.T) {
// Tests that zero-period dev mode can handle a lot of simultaneous // Tests that zero-period dev mode can handle a lot of simultaneous
// transactions/withdrawals // transactions/withdrawals
func TestOnDemandSpam(t *testing.T) { func TestOnDemandSpam(t *testing.T) {
// This test is flaky, due to various causes, and the root cause is synchronicity.
// We have optimistic timeouts here and there in the simulated becaon and the worker.
// This test typically fails on 32-bit windows appveyor.
t.Skip("flaky test")
var ( var (
withdrawals []types.Withdrawal withdrawals []types.Withdrawal
txs = make(map[common.Hash]*types.Transaction) txCount = 20000
wxCount = 20
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddr = crypto.PubkeyToAddress(testKey.PublicKey) testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
gasLimit uint64 = 10_000_000 gasLimit uint64 = 10_000_000
@ -160,7 +166,7 @@ func TestOnDemandSpam(t *testing.T) {
defer sub.Unsubscribe() defer sub.Unsubscribe()
// generate some withdrawals // generate some withdrawals
for i := 0; i < 20; i++ { for i := 0; i < wxCount; i++ {
withdrawals = append(withdrawals, types.Withdrawal{Index: uint64(i)}) withdrawals = append(withdrawals, types.Withdrawal{Index: uint64(i)})
if err := mock.withdrawals.add(&withdrawals[i]); err != nil { if err := mock.withdrawals.add(&withdrawals[i]); err != nil {
t.Fatal("addWithdrawal failed", err) t.Fatal("addWithdrawal failed", err)
@ -168,37 +174,37 @@ func TestOnDemandSpam(t *testing.T) {
} }
// generate a bunch of transactions // generate a bunch of transactions
for i := 0; i < 20000; i++ { go func() {
tx, err := types.SignTx(types.NewTransaction(uint64(i), common.Address{byte(i), byte(1)}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), signer, testKey) for i := 0; i < txCount; i++ {
if err != nil { tx, err := types.SignTx(types.NewTransaction(uint64(i), common.Address{byte(i), byte(1)}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), signer, testKey)
t.Fatal("error signing transaction", err) if err != nil {
panic(fmt.Sprintf("error signing transaction: %v", err))
}
if err := eth.TxPool().Add([]*types.Transaction{tx}, false)[0]; err != nil {
panic(fmt.Sprintf("error adding txs to pool: %v", err))
}
} }
txs[tx.Hash()] = tx }()
if err := eth.APIBackend.SendTx(context.Background(), tx); err != nil {
t.Fatal("error adding txs to pool", err)
}
}
var ( var (
includedTxs = make(map[common.Hash]struct{}) includedTxs int
includedWxs []uint64 includedWxs int
abort = time.NewTimer(10 * time.Second)
) )
defer abort.Stop()
for { for {
select { select {
case ev := <-chainHeadCh: case ev := <-chainHeadCh:
block := eth.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64()) block := eth.BlockChain().GetBlock(ev.Header.Hash(), ev.Header.Number.Uint64())
for _, itx := range block.Transactions() { includedTxs += len(block.Transactions())
includedTxs[itx.Hash()] = struct{}{} includedWxs += len(block.Withdrawals())
}
for _, iwx := range block.Withdrawals() {
includedWxs = append(includedWxs, iwx.Index)
}
// ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10 // ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10
if len(includedTxs) == len(txs) && len(includedWxs) == len(withdrawals) { if includedTxs == txCount && includedWxs == wxCount {
return return
} }
case <-time.After(10 * time.Second): abort.Reset(10 * time.Second)
t.Fatalf("timed out without including all withdrawals/txs: have txs %d, want %d, have wxs %d, want %d", len(includedTxs), len(txs), len(includedWxs), len(withdrawals)) case <-abort.C:
t.Fatalf("timed out without including all withdrawals/txs: have txs %d, want %d, have wxs %d, want %d",
includedTxs, txCount, includedWxs, wxCount)
} }
} }
} }

View file

@ -1108,7 +1108,7 @@ func TestTransactionFetcherBandwidthLimiting(t *testing.T) {
doTxNotify{peer: "C", doTxNotify{peer: "C",
hashes: []common.Hash{{0x07}, {0x08}}, hashes: []common.Hash{{0x07}, {0x08}},
types: []byte{types.BlobTxType, types.BlobTxType}, types: []byte{types.BlobTxType, types.BlobTxType},
sizes: []uint32{params.MaxBlobGasPerBlock, params.MaxBlobGasPerBlock}, sizes: []uint32{params.BlobTxBlobGasPerBlob * 10, params.BlobTxBlobGasPerBlob * 10},
}, },
doWait{time: txArriveTimeout, step: true}, doWait{time: txArriveTimeout, step: true},
isWaiting(nil), isWaiting(nil),
@ -1125,8 +1125,8 @@ func TestTransactionFetcherBandwidthLimiting(t *testing.T) {
{common.Hash{0x06}, types.LegacyTxType, maxTxRetrievalSize}, {common.Hash{0x06}, types.LegacyTxType, maxTxRetrievalSize},
}, },
"C": { "C": {
{common.Hash{0x07}, types.BlobTxType, params.MaxBlobGasPerBlock}, {common.Hash{0x07}, types.BlobTxType, params.BlobTxBlobGasPerBlob * 10},
{common.Hash{0x08}, types.BlobTxType, params.MaxBlobGasPerBlock}, {common.Hash{0x08}, types.BlobTxType, params.BlobTxBlobGasPerBlob * 10},
}, },
}, },
fetching: map[string][]common.Hash{ fetching: map[string][]common.Hash{

View file

@ -31,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
) )
@ -97,8 +96,10 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
} }
// Fill in blob base fee and next blob base fee. // Fill in blob base fee and next blob base fee.
if excessBlobGas := bf.header.ExcessBlobGas; excessBlobGas != nil { if excessBlobGas := bf.header.ExcessBlobGas; excessBlobGas != nil {
bf.results.blobBaseFee = eip4844.CalcBlobFee(*excessBlobGas) bf.results.blobBaseFee = eip4844.CalcBlobFee(config, bf.header)
bf.results.nextBlobBaseFee = eip4844.CalcBlobFee(eip4844.CalcExcessBlobGas(*excessBlobGas, *bf.header.BlobGasUsed)) excess := eip4844.CalcExcessBlobGas(config, bf.header, bf.header.Time)
next := &types.Header{Number: bf.header.Number, Time: bf.header.Time, ExcessBlobGas: &excess}
bf.results.nextBlobBaseFee = eip4844.CalcBlobFee(config, next)
} else { } else {
bf.results.blobBaseFee = new(big.Int) bf.results.blobBaseFee = new(big.Int)
bf.results.nextBlobBaseFee = new(big.Int) bf.results.nextBlobBaseFee = new(big.Int)
@ -106,7 +107,8 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
// Compute gas used ratio for normal and blob gas. // Compute gas used ratio for normal and blob gas.
bf.results.gasUsedRatio = float64(bf.header.GasUsed) / float64(bf.header.GasLimit) bf.results.gasUsedRatio = float64(bf.header.GasUsed) / float64(bf.header.GasLimit)
if blobGasUsed := bf.header.BlobGasUsed; blobGasUsed != nil { if blobGasUsed := bf.header.BlobGasUsed; blobGasUsed != nil {
bf.results.blobGasUsedRatio = float64(*blobGasUsed) / params.MaxBlobGasPerBlock maxBlobs := eip4844.MaxBlobsPerBlock(config, bf.header.Time)
bf.results.blobGasUsedRatio = float64(*blobGasUsed) / float64(maxBlobs)
} }
if len(percentiles) == 0 { if len(percentiles) == 0 {

View file

@ -157,6 +157,7 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, cancunBlock *big.Int, pe
ts := gspec.Timestamp + cancunBlock.Uint64()*10 // fixed 10 sec block time in blockgen ts := gspec.Timestamp + cancunBlock.Uint64()*10 // fixed 10 sec block time in blockgen
config.ShanghaiTime = &ts config.ShanghaiTime = &ts
config.CancunTime = &ts config.CancunTime = &ts
config.BlobScheduleConfig = params.DefaultBlobSchedule
signer = types.LatestSigner(gspec.Config) signer = types.LatestSigner(gspec.Config)
} }

View file

@ -68,7 +68,7 @@ type txPool interface {
Get(hash common.Hash) *types.Transaction Get(hash common.Hash) *types.Transaction
// Add should add the given transactions to the pool. // Add should add the given transactions to the pool.
Add(txs []*types.Transaction, local bool, sync bool) []error Add(txs []*types.Transaction, sync bool) []error
// Pending should return pending transactions. // Pending should return pending transactions.
// The slice should be modifiable by the caller. // The slice should be modifiable by the caller.
@ -189,7 +189,7 @@ func newHandler(config *handlerConfig) (*handler, error) {
return p.RequestTxs(hashes) return p.RequestTxs(hashes)
} }
addTxs := func(txs []*types.Transaction) []error { addTxs := func(txs []*types.Transaction) []error {
return h.txpool.Add(txs, false, false) return h.txpool.Add(txs, false)
} }
h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer) h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, addTxs, fetchTx, h.removePeer)
return h, nil return h, nil

View file

@ -299,8 +299,8 @@ func testSendTransactions(t *testing.T, protocol uint) {
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
insert[nonce] = tx insert[nonce] = tx
} }
go handler.txpool.Add(insert, false, false) // Need goroutine to not block on feed go handler.txpool.Add(insert, false) // Need goroutine to not block on feed
time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join)
// Create a source handler to send messages through and a sink peer to receive them // Create a source handler to send messages through and a sink peer to receive them
p2pSrc, p2pSink := p2p.MsgPipe() p2pSrc, p2pSink := p2p.MsgPipe()
@ -419,7 +419,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
txs[nonce] = tx txs[nonce] = tx
} }
source.txpool.Add(txs, false, false) source.txpool.Add(txs, false)
// Iterate through all the sinks and ensure they all got the transactions // Iterate through all the sinks and ensure they all got the transactions
for i := range sinks { for i := range sinks {

View file

@ -80,7 +80,7 @@ func (p *testTxPool) Get(hash common.Hash) *types.Transaction {
// Add appends a batch of transactions to the pool, and notifies any // Add appends a batch of transactions to the pool, and notifies any
// listeners if the addition channel is non nil // listeners if the addition channel is non nil
func (p *testTxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { func (p *testTxPool) Add(txs []*types.Transaction, sync bool) []error {
p.lock.Lock() p.lock.Lock()
defer p.lock.Unlock() defer p.lock.Unlock()

View file

@ -1088,6 +1088,10 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig)
copy.PragueTime = timestamp copy.PragueTime = timestamp
canon = false canon = false
} }
if timestamp := override.OsakaTime; timestamp != nil {
copy.OsakaTime = timestamp
canon = false
}
if timestamp := override.VerkleTime; timestamp != nil { if timestamp := override.VerkleTime; timestamp != nil {
copy.VerkleTime = timestamp copy.VerkleTime = timestamp
canon = false canon = false

View file

@ -1,4 +1,4 @@
// Copyright 2017 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify

View file

@ -1,3 +1,19 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tracetest package tracetest
import ( import (

View file

@ -1,3 +1,19 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// makeTest generates a test for the configured tracer by running // makeTest generates a test for the configured tracer by running
// a prestate reassembled and a call trace run, assembling all the // a prestate reassembled and a call trace run, assembling all the
// gathered information into a test case. // gathered information into a test case.

View file

@ -1,4 +1,4 @@
// Copyright 2021 The go-ethereum Authors // Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify

View file

@ -1,4 +1,4 @@
// Copyright 2021 The go-ethereum Authors // Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
@ -360,6 +360,7 @@ func TestSupplySelfdestruct(t *testing.T) {
cancunTime := uint64(0) cancunTime := uint64(0)
gspec.Config.ShanghaiTime = &cancunTime gspec.Config.ShanghaiTime = &cancunTime
gspec.Config.CancunTime = &cancunTime gspec.Config.CancunTime = &cancunTime
gspec.Config.BlobScheduleConfig = params.DefaultBlobSchedule
postCancunOutput, postCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc) postCancunOutput, postCancunChain, err := testSupplyTracer(t, gspec, testBlockGenerationFunc)
if err != nil { if err != nil {

View file

@ -41,7 +41,14 @@
"grayGlacierBlock": 0, "grayGlacierBlock": 0,
"shanghaiTime": 0, "shanghaiTime": 0,
"cancunTime": 0, "cancunTime": 0,
"terminalTotalDifficulty": 0 "terminalTotalDifficulty": 0,
"blobSchedule": {
"cancun": {
"target": 3,
"max": 6,
"baseFeeUpdateFraction": 3338477
}
}
} }
}, },
"context": { "context": {

View file

@ -41,7 +41,14 @@
"grayGlacierBlock": 0, "grayGlacierBlock": 0,
"shanghaiTime": 0, "shanghaiTime": 0,
"cancunTime": 0, "cancunTime": 0,
"terminalTotalDifficulty": 0 "terminalTotalDifficulty": 0,
"blobSchedule": {
"cancun": {
"target": 3,
"max": 6,
"baseFeeUpdateFraction": 3338477
}
}
} }
}, },
"context": { "context": {
@ -54,7 +61,9 @@
}, },
"input": "0x03f8b1820539806485174876e800825208940c2c51a0990aee1d73c1228de1586883415575088080c083020000f842a00100c9fbdf97f747e85847b4f3fff408f89c26842f77c882858bf2c89923849aa00138e3896f3c27f2389147507f8bcec52028b0efca6ee842ed83c9158873943880a0dbac3f97a532c9b00e6239b29036245a5bfbb96940b9d848634661abee98b945a03eec8525f261c2e79798f7b45a5d6ccaefa24576d53ba5023e919b86841c0675", "input": "0x03f8b1820539806485174876e800825208940c2c51a0990aee1d73c1228de1586883415575088080c083020000f842a00100c9fbdf97f747e85847b4f3fff408f89c26842f77c882858bf2c89923849aa00138e3896f3c27f2389147507f8bcec52028b0efca6ee842ed83c9158873943880a0dbac3f97a532c9b00e6239b29036245a5bfbb96940b9d848634661abee98b945a03eec8525f261c2e79798f7b45a5d6ccaefa24576d53ba5023e919b86841c0675",
"result": { "result": {
"0x0000000000000000000000000000000000000000": { "balance": "0x272e0528" }, "0x0000000000000000000000000000000000000000": {
"balance": "0x272e0528"
},
"0x0c2c51a0990aee1d73c1228de158688341557508": { "0x0c2c51a0990aee1d73c1228de158688341557508": {
"balance": "0xde0b6b3a7640000" "balance": "0xde0b6b3a7640000"
} }

View file

@ -53,7 +53,19 @@
"shanghaiTime": 0, "shanghaiTime": 0,
"cancunTime": 0, "cancunTime": 0,
"pragueTime": 0, "pragueTime": 0,
"terminalTotalDifficulty": 0 "terminalTotalDifficulty": 0,
"blobSchedule": {
"cancun": {
"target": 3,
"max": 6,
"baseFeeUpdateFraction": 3338477
},
"prague": {
"target": 3,
"max": 6,
"baseFeeUpdateFraction": 5007716
}
}
} }
}, },
"context": { "context": {

Some files were not shown because too many files have changed in this diff Show more