Documentation ¶
Index ¶
Constants ¶
const ( VerifyingKeyFileName = "verifying_key.bin" CircuitFileName = "circuit.bin" VerifierContractFileName = "Verifier.sol" ManifestFileName = "manifest.json" DefaultDictionaryFileName = "compressor_dict.bin" RequestsFromSubDir = "requests" RequestsToSubDir = "responses" RequestsDoneSubDir = "requests-done" InProgressSufix = "inprogress" FailSuffix = "code" SuccessSuffix = "success" // Extension to add in order to defer the job to the large prover LargeSuffix = "large" )
Variables ¶
var ( DefaultDeferToOtherLargeCodes = []int{137} // List of exit codes for which the job will put back the job to be reexecuted in large mode. DefaultRetryLocallyWithLargeCodes = []int{77, 333} // List of exit codes for which the job will retry in large mode )
Functions ¶
func SetupLogger ¶
func SetupLogger(level logLevel) error
SetupLogger initializes the logger with the given log level and log file.
Types ¶
type Aggregation ¶
type Aggregation struct { WithRequestDir `mapstructure:",squash"` // ProverMode stores the kind of prover to use. ProverMode ProverMode `mapstructure:"prover_mode" validate:"required,oneof=dev full"` // Number of proofs that are supported by the aggregation circuit. NumProofs []int `mapstructure:"num_proofs" validate:"required,dive,gt=0,number"` // AllowedInputs determines the "inner" plonk circuits the "outer" aggregation circuit can aggregate. // Order matters. AllowedInputs []string `` /* 238-byte string literal not displayed */ // note @gbotrel keeping that around in case we need to support two emulation contract // during a migration. // Verifier ID to assign to the proof once generated. It will be used // by the L1 contracts to determine which solidity Plonk verifier // contract should be used to verify the proof. VerifierID int `mapstructure:"verifier_id" validate:"gte=0,number"` }
type BlobDecompression ¶
type BlobDecompression struct { WithRequestDir `mapstructure:",squash"` // ProverMode stores the kind of prover to use. ProverMode ProverMode `mapstructure:"prover_mode" validate:"required,oneof=dev full"` // DictPaths is an optional parameters allowing the user to specify explicitly // where to look for the compression dictionaries. If the input is not provided // then the dictionary will be fetched in <assets_dir>/<version>/<circuitID>/compression_dict.bin. // // We stress that the feature should not be used in production and should // only be used in E2E testing context. TODO @Tabaie @alexandre.belling revise this warning, seems to no longer apply DictPaths []string `mapstructure:"dict_paths"` }
type Config ¶
type Config struct { // Environment stores the environment in which the application is running. // It enables us have a clear domain separation for generated assets. Environment string `validate:"required,oneof=mainnet sepolia devnet integration-development integration-full integration-benchmark"` // TODO @gbotrel define explicitly where we use that and for why; // if we supply as is to coordinator in responses, coordinator should parse semver Version string `validate:"required,semver"` // LogLevel sets the log level for the logger. LogLevel logLevel `mapstructure:"log_level" validate:"required,gte=0,lte=6"` // AssetsDir stores the root of the directory where the assets are stored (setup) or // accessed (prover). The file structure is described in TODO @gbotrel. AssetsDir string `mapstructure:"assets_dir" validate:"required,dir"` Controller Controller Execution Execution BlobDecompression BlobDecompression `mapstructure:"blob_decompression"` Aggregation Aggregation PublicInputInterconnection PublicInput `mapstructure:"public_input_interconnection"` // TODO add wizard compilation params Debug struct { // Profiling indicates whether we want to generate profiles using the [runtime/pprof] pkg. // Profiles can later be read using the `go tool pprof` command. Profiling bool `mapstructure:"profiling"` // Tracing indicates whether we want to generate traces using the [runtime/trace] pkg. // Traces can later be read using the `go tool trace` command. Tracing bool `mapstructure:"tracing"` } Layer2 struct { // ChainID stores the ID of the Linea L2 network to consider. ChainID uint `mapstructure:"chain_id" validate:"required"` // MsgSvcContractStr stores the unique ID of the Service Contract (SC), that is, it's // address, as a string. The Service Contract (SC) is a smart contract that the L2 // network uses to send messages (i.e., transactions) to the L1 (mainnet). // Use this field when you need the ETH address as a string. MsgSvcContractStr string `mapstructure:"message_service_contract" validate:"required,eth_addr"` // MsgSvcContract stores the unique ID of the Service Contract (SC), as a common.Address. MsgSvcContract common.Address `mapstructure:"-"` } TracesLimits TracesLimits `mapstructure:"traces_limits" validate:"required"` TracesLimitsLarge TracesLimits `mapstructure:"traces_limits_large" validate:"required"` }
func NewConfigFromFile ¶
NewConfigFromFile reads the configuration from the given file path and returns a new Config. It also sets default value and validate the configuration.
func (*Config) BlobDecompressionDictStore ¶
func (cfg *Config) BlobDecompressionDictStore(circuitID string) dictionary.Store
BlobDecompressionDictStore returns a decompression dictionary store loaded from paths specified in [BlobDecompression.DictPaths]. If no such path is provided, it loads one from the prover assets path depending on the provided circuitID.
func (*Config) PathForSRS ¶
PathForSRS returns the path to the SRS directory.
func (*Config) PathForSetup ¶
PathForSetup returns the path to the setup directory for the given circuitID. e.g. .../prover-assets/0.1.0/mainnet/execution
type Controller ¶
type Controller struct { // The unique id of this process. Must be unique between all workers. This // field is not to be populated by the toml configuration file. It is to be // through an environment variable. LocalID string // Prometheus stores the configuration for the Prometheus metrics server. Prometheus Prometheus // The delays at which we retry when we find no files in the queue. If this // is set to [0, 1, 2, 3, 4, 5]. It will retry after 0 sec the first time it // cannot find a file in the queue, 1 sec the second time and so on. Once it // reaches the final value it keeps it as a final retry delay. RetryDelays []int `mapstructure:"retry_delays"` // List of exit codes for which the job will put back the job to be reexecuted in large mode. DeferToOtherLargeCodes []int `mapstructure:"defer_to_other_large_codes"` // List of exit codes for which the job will retry in large mode RetryLocallyWithLargeCodes []int `mapstructure:"retry_locally_with_large_codes"` // defaults to true; the controller will not pick associated jobs if false. EnableExecution bool `mapstructure:"enable_execution"` EnableBlobDecompression bool `mapstructure:"enable_blob_decompression"` EnableAggregation bool `mapstructure:"enable_aggregation"` // TODO @gbotrel the only reason we keep these is for test purposes; default value is fine, // we should remove them from here for readability. WorkerCmd string `mapstructure:"worker_cmd_tmpl"` WorkerCmdLarge string `mapstructure:"worker_cmd_large_tmpl"` WorkerCmdTmpl *template.Template `mapstructure:"-"` WorkerCmdLargeTmpl *template.Template `mapstructure:"-"` }
type Execution ¶
type Execution struct { WithRequestDir `mapstructure:",squash"` // ProverMode stores the kind of prover to use. ProverMode ProverMode `mapstructure:"prover_mode" validate:"required,oneof=dev partial full proofless bench check-only"` // CanRunFullLarge indicates whether the prover is running on a large machine (and can run full large traces). CanRunFullLarge bool `mapstructure:"can_run_full_large"` // ConflatedTracesDir stores the directory where the conflation traces are stored. ConflatedTracesDir string `mapstructure:"conflated_traces_dir" validate:"required"` }
type Prometheus ¶
type ProverMode ¶
type ProverMode string
const ( ProverModeDev ProverMode = "dev" ProverModePartial ProverMode = "partial" ProverModeFull ProverMode = "full" ProverModeProofless ProverMode = "proofless" // ProverModeBench is used to only run the inner-proof. This is convenient // in a context where it is simpler to not have to deal with the setup. ProverModeBench ProverMode = "bench" // ProverModeCheckOnly is used to test the constraints of the whole system ProverModeCheckOnly ProverMode = "check-only" )
type PublicInput ¶
type PublicInput struct { MaxNbDecompression int `mapstructure:"max_nb_decompression" validate:"gte=0"` MaxNbExecution int `mapstructure:"max_nb_execution" validate:"gte=0"` MaxNbCircuits int `mapstructure:"max_nb_circuits" validate:"gte=0"` // if not set, will be set to MaxNbDecompression + MaxNbExecution ExecutionMaxNbMsg int `mapstructure:"execution_max_nb_msg" validate:"gte=0"` L2MsgMerkleDepth int `mapstructure:"l2_msg_merkle_depth" validate:"gte=0"` L2MsgMaxNbMerkle int `mapstructure:"l2_msg_max_nb_merkle" validate:"gte=0"` // if not explicitly provided (i.e. non-positive) it will be set to maximum MockKeccakWizard bool // for testing purposes only ChainID uint64 // duplicate from Config L2MsgServiceAddr common.Address // duplicate from Config }
type TracesLimits ¶
type TracesLimits struct { Add int `mapstructure:"ADD" validate:"power_of_2" corset:"add"` Bin int `mapstructure:"BIN" validate:"power_of_2" corset:"bin"` Blake2Fmodexpdata int `mapstructure:"BLAKE_MODEXP_DATA" validate:"power_of_2" corset:"blake2fmodexpdata"` Blockdata int `mapstructure:"BLOCK_DATA" corset:"blockdata"` Blockhash int `mapstructure:"BLOCK_HASH" validate:"power_of_2" corset:"blockhash"` Ecdata int `mapstructure:"EC_DATA" validate:"power_of_2" corset:"ecdata"` Euc int `mapstructure:"EUC" validate:"power_of_2" corset:"euc"` Exp int `mapstructure:"EXP" validate:"power_of_2" corset:"exp"` Ext int `mapstructure:"EXT" validate:"power_of_2" corset:"ext"` Gas int `mapstructure:"GAS" validate:"power_of_2" corset:"gas"` Hub int `mapstructure:"HUB" validate:"power_of_2" corset:"hub"` Logdata int `mapstructure:"LOG_DATA" validate:"power_of_2" corset:"logdata"` Loginfo int `mapstructure:"LOG_INFO" validate:"power_of_2" corset:"loginfo"` Mmio int `mapstructure:"MMIO" validate:"power_of_2" corset:"mmio"` Mmu int `mapstructure:"MMU" validate:"power_of_2" corset:"mmu"` Mod int `mapstructure:"MOD" validate:"power_of_2" corset:"mod"` Mul int `mapstructure:"MUL" validate:"power_of_2" corset:"mul"` Mxp int `mapstructure:"MXP" validate:"power_of_2" corset:"mxp"` Oob int `mapstructure:"OOB" validate:"power_of_2" corset:"oob"` Rlpaddr int `mapstructure:"RLP_ADDR" validate:"power_of_2" corset:"rlpaddr"` Rlptxn int `mapstructure:"RLP_TXN" validate:"power_of_2" corset:"rlptxn"` Rlptxrcpt int `mapstructure:"RLP_TXN_RCPT" validate:"power_of_2" corset:"rlptxrcpt"` Rom int `mapstructure:"ROM" validate:"power_of_2" corset:"rom"` Romlex int `mapstructure:"ROM_LEX" validate:"power_of_2" corset:"romlex"` Shakiradata int `mapstructure:"SHAKIRA_DATA" validate:"power_of_2" corset:"shakiradata"` Shf int `mapstructure:"SHF" validate:"power_of_2" corset:"shf"` Stp int `mapstructure:"STP" validate:"power_of_2" corset:"stp"` Trm int `mapstructure:"TRM" validate:"power_of_2" corset:"trm"` Txndata int `mapstructure:"TXN_DATA" validate:"power_of_2" corset:"txndata"` Wcp int `mapstructure:"WCP" validate:"power_of_2" corset:"wcp"` Binreftable int `mapstructure:"BIN_REFERENCE_TABLE" validate:"power_of_2" corset:"binreftable"` Shfreftable int `mapstructure:"SHF_REFERENCE_TABLE" validate:"power_of_2" corset:"shfreftable"` Instdecoder int `mapstructure:"INSTRUCTION_DECODER" validate:"power_of_2" corset:"instdecoder"` PrecompileEcrecoverEffectiveCalls int `mapstructure:"PRECOMPILE_ECRECOVER_EFFECTIVE_CALLS"` PrecompileSha2Blocks int `mapstructure:"PRECOMPILE_SHA2_BLOCKS"` PrecompileRipemdBlocks int `mapstructure:"PRECOMPILE_RIPEMD_BLOCKS"` PrecompileModexpEffectiveCalls int `mapstructure:"PRECOMPILE_MODEXP_EFFECTIVE_CALLS"` PrecompileEcaddEffectiveCalls int `mapstructure:"PRECOMPILE_ECADD_EFFECTIVE_CALLS"` PrecompileEcmulEffectiveCalls int `mapstructure:"PRECOMPILE_ECMUL_EFFECTIVE_CALLS"` PrecompileEcpairingEffectiveCalls int `mapstructure:"PRECOMPILE_ECPAIRING_FINAL_EXPONENTIATIONS"` PrecompileEcpairingMillerLoops int `mapstructure:"PRECOMPILE_ECPAIRING_MILLER_LOOPS"` PrecompileEcpairingG2MembershipCalls int `mapstructure:"PRECOMPILE_ECPAIRING_G2_MEMBERSHIP_CALLS"` PrecompileBlakeEffectiveCalls int `mapstructure:"PRECOMPILE_BLAKE_EFFECTIVE_CALLS"` PrecompileBlakeRounds int `mapstructure:"PRECOMPILE_BLAKE_ROUNDS"` BlockKeccak int `mapstructure:"BLOCK_KECCAK"` BlockL1Size int `mapstructure:"BLOCK_L1_SIZE"` BlockL2L1Logs int `mapstructure:"BLOCK_L2_L1_LOGS"` BlockTransactions int `mapstructure:"BLOCK_TRANSACTIONS"` ShomeiMerkleProofs int `mapstructure:"SHOMEI_MERKLE_PROOFS"` }
The trace limits define the maximum trace size per module that the prover can handle. Raising these limits increases the prover's memory needs and simultaneously decreases the number of transactions it can prove in a single go. These traces are vital for the setup generator, so any changes in trace limits mean we'll need to run a new setup and update the verifier contracts before deploying.
func (*TracesLimits) Checksum ¶
func (tl *TracesLimits) Checksum() string
type WithRequestDir ¶
type WithRequestDir struct {
RequestsRootDir string `mapstructure:"requests_root_dir" validate:"required"`
}
func (*WithRequestDir) DirDone ¶
func (cfg *WithRequestDir) DirDone() string
func (*WithRequestDir) DirFrom ¶
func (cfg *WithRequestDir) DirFrom() string
func (*WithRequestDir) DirTo ¶
func (cfg *WithRequestDir) DirTo() string