diff --git a/pkg/settings/cresettings/defaults.json b/pkg/settings/cresettings/defaults.json index 2b2a7c7ef..3355738b6 100644 --- a/pkg/settings/cresettings/defaults.json +++ b/pkg/settings/cresettings/defaults.json @@ -31,6 +31,10 @@ "WASMExecutionTimeout": "1m0s", "WASMMemoryLimit": "100mb", "WASMBinarySizeLimit": "30mb", + "WASMCompressedBinarySizeLimit": "20mb", + "WASMConfigSizeLimit": "30mb", + "WASMSecretsSizeLimit": "30mb", + "WASMResponseSizeLimit": "5mb", "ConsensusObservationSizeLimit": "10kb", "ConsensusCallsLimit": "2", "LogLineLimit": "1kb", @@ -65,7 +69,7 @@ "CallLimit": "2" }, "HTTPAction": { - "CallLimit": "3", + "CallLimit": "5", "ResponseSizeLimit": "10kb", "ConnectionTimeout": "10s", "RequestSizeLimit": "100kb", diff --git a/pkg/settings/cresettings/defaults.toml b/pkg/settings/cresettings/defaults.toml index 7376c826a..d09f6bad4 100644 --- a/pkg/settings/cresettings/defaults.toml +++ b/pkg/settings/cresettings/defaults.toml @@ -31,6 +31,10 @@ ExecutionResponseLimit = '100kb' WASMExecutionTimeout = '1m0s' WASMMemoryLimit = '100mb' WASMBinarySizeLimit = '30mb' +WASMCompressedBinarySizeLimit = '20mb' +WASMConfigSizeLimit = '30mb' +WASMSecretsSizeLimit = '30mb' +WASMResponseSizeLimit = '5mb' ConsensusObservationSizeLimit = '10kb' ConsensusCallsLimit = '2' LogLineLimit = '1kb' @@ -66,7 +70,7 @@ ObservationSizeLimit = '10kb' CallLimit = '2' [PerWorkflow.HTTPAction] -CallLimit = '3' +CallLimit = '5' ResponseSizeLimit = '10kb' ConnectionTimeout = '10s' RequestSizeLimit = '100kb' diff --git a/pkg/settings/cresettings/settings.go b/pkg/settings/cresettings/settings.go index 75a36c0e0..67830feba 100644 --- a/pkg/settings/cresettings/settings.go +++ b/pkg/settings/cresettings/settings.go @@ -77,6 +77,10 @@ var Default = Schema{ WASMExecutionTimeout: Duration(60 * time.Second), WASMMemoryLimit: Size(100 * config.MByte), WASMBinarySizeLimit: Size(30 * config.MByte), + WASMCompressedBinarySizeLimit: Size(20 * config.MByte), + WASMConfigSizeLimit: Size(30 * config.MByte), + WASMSecretsSizeLimit: Size(30 * config.MByte), + WASMResponseSizeLimit: Size(5 * config.MByte), ConsensusObservationSizeLimit: Size(10 * config.KByte), ConsensusCallsLimit: Int(2), LogLineLimit: Size(config.KByte), @@ -113,7 +117,7 @@ var Default = Schema{ CallLimit: Int(2), }, HTTPAction: httpAction{ - CallLimit: Int(3), + CallLimit: Int(5), ResponseSizeLimit: Size(10 * config.KByte), ConnectionTimeout: Duration(10 * time.Second), RequestSizeLimit: Size(100 * config.KByte), @@ -163,9 +167,13 @@ type Workflows struct { ExecutionTimeout Setting[time.Duration] ExecutionResponseLimit Setting[config.Size] - WASMExecutionTimeout Setting[time.Duration] - WASMMemoryLimit Setting[config.Size] - WASMBinarySizeLimit Setting[config.Size] + WASMExecutionTimeout Setting[time.Duration] + WASMMemoryLimit Setting[config.Size] + WASMBinarySizeLimit Setting[config.Size] + WASMCompressedBinarySizeLimit Setting[config.Size] + WASMConfigSizeLimit Setting[config.Size] + WASMSecretsSizeLimit Setting[config.Size] + WASMResponseSizeLimit Setting[config.Size] // Deprecated: use Consensus.ObservationSizeLimit ConsensusObservationSizeLimit Setting[config.Size] diff --git a/pkg/workflows/wasm/host/module.go b/pkg/workflows/wasm/host/module.go index 7bf980bef..0b01d01ce 100644 --- a/pkg/workflows/wasm/host/module.go +++ b/pkg/workflows/wasm/host/module.go @@ -21,8 +21,11 @@ import ( "github.com/bytecodealliance/wasmtime-go/v28" "google.golang.org/protobuf/proto" + "github.com/smartcontractkit/chainlink-common/pkg/config" "github.com/smartcontractkit/chainlink-common/pkg/custmsg" "github.com/smartcontractkit/chainlink-common/pkg/logger" + "github.com/smartcontractkit/chainlink-common/pkg/settings" + "github.com/smartcontractkit/chainlink-common/pkg/settings/limits" dagsdk "github.com/smartcontractkit/chainlink-common/pkg/workflows/sdk" "github.com/smartcontractkit/chainlink-common/pkg/workflows/wasm" wasmdagpb "github.com/smartcontractkit/chainlink-common/pkg/workflows/wasm/pb" @@ -52,18 +55,22 @@ type DeterminismConfig struct { Seed int64 } type ModuleConfig struct { - TickInterval time.Duration - Timeout *time.Duration - MaxMemoryMBs uint64 - MinMemoryMBs uint64 - InitialFuel uint64 - Logger logger.Logger - IsUncompressed bool - Fetch func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) - MaxFetchRequests int - MaxCompressedBinarySize uint64 - MaxDecompressedBinarySize uint64 - MaxResponseSizeBytes uint64 + TickInterval time.Duration + Timeout *time.Duration + MaxMemoryMBs uint64 + MinMemoryMBs uint64 + MemoryLimiter limits.BoundLimiter[config.Size] // supersedes Max/MinMemoryMBs if set + InitialFuel uint64 + Logger logger.Logger + IsUncompressed bool + Fetch func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) + MaxFetchRequests int + MaxCompressedBinarySize uint64 + MaxCompressedBinaryLimiter limits.BoundLimiter[config.Size] // supersedes MaxCompressedBinarySize if set + MaxDecompressedBinarySize uint64 + MaxDecompressedBinaryLimiter limits.BoundLimiter[config.Size] // supersedes MaxDecompressedBinarySize if set + MaxResponseSizeBytes uint64 + MaxResponseSizeLimiter limits.BoundLimiter[config.Size] // supersedes MaxResponseSizeBytes if set MaxLogLenBytes uint32 MaxLogCountDONMode uint32 @@ -143,7 +150,7 @@ func WithDeterminism() func(*ModuleConfig) { } } -func NewModule(modCfg *ModuleConfig, binary []byte, opts ...func(*ModuleConfig)) (*module, error) { +func NewModule(ctx context.Context, modCfg *ModuleConfig, binary []byte, opts ...func(*ModuleConfig)) (*module, error) { // Apply options to the module config. for _, opt := range opts { opt(modCfg) @@ -200,33 +207,59 @@ func NewModule(modCfg *ModuleConfig, binary []byte, opts ...func(*ModuleConfig)) modCfg.MaxLogCountNodeMode = uint32(defaultMaxLogCountNodeMode) } - // Take the max of the min and the configured max memory mbs. - // We do this because Go requires a minimum of 16 megabytes to run, - // and local testing has shown that with less than the min, some - // binaries may error sporadically. - modCfg.MaxMemoryMBs = uint64(math.Max(float64(modCfg.MinMemoryMBs), float64(modCfg.MaxMemoryMBs))) - - cfg := wasmtime.NewConfig() - cfg.SetEpochInterruption(true) - if modCfg.InitialFuel > 0 { - cfg.SetConsumeFuel(true) + lf := limits.Factory{Logger: modCfg.Logger} + if modCfg.MemoryLimiter == nil { + // Take the max of the min and the configured max memory mbs. + // We do this because Go requires a minimum of 16 megabytes to run, + // and local testing has shown that with less than the min, some + // binaries may error sporadically. + modCfg.MaxMemoryMBs = uint64(math.Max(float64(modCfg.MinMemoryMBs), float64(modCfg.MaxMemoryMBs))) + limit := settings.Size(config.Size(modCfg.MaxMemoryMBs) * config.MByte) + var err error + modCfg.MemoryLimiter, err = limits.MakeBoundLimiter(lf, limit) + if err != nil { + return nil, fmt.Errorf("failed to make memory limiter: %w", err) + } + } + if modCfg.MaxCompressedBinaryLimiter == nil { + limit := settings.Size(config.Size(modCfg.MaxCompressedBinarySize)) + var err error + modCfg.MaxCompressedBinaryLimiter, err = limits.MakeBoundLimiter(lf, limit) + if err != nil { + return nil, fmt.Errorf("failed to make compressed binary size limiter: %w", err) + } + } + if modCfg.MaxDecompressedBinaryLimiter == nil { + limit := settings.Size(config.Size(modCfg.MaxDecompressedBinarySize)) + var err error + modCfg.MaxDecompressedBinaryLimiter, err = limits.MakeBoundLimiter(lf, limit) + if err != nil { + return nil, fmt.Errorf("failed to make decompressed binary size limiter: %w", err) + } + } + if modCfg.MaxResponseSizeLimiter == nil { + limit := settings.Size(config.Size(modCfg.MaxResponseSizeBytes)) + var err error + modCfg.MaxResponseSizeLimiter, err = limits.MakeBoundLimiter(lf, limit) + if err != nil { + return nil, fmt.Errorf("failed to make response size limiter: %w", err) + } } - cfg.CacheConfigLoadDefault() - cfg.SetCraneliftOptLevel(wasmtime.OptLevelSpeedAndSize) - - // Handled differenty based on host OS. - SetUnwinding(cfg) - - engine := wasmtime.NewEngineWithConfig(cfg) if !modCfg.IsUncompressed { // validate the binary size before decompressing // this is to prevent decompression bombs - if uint64(len(binary)) > modCfg.MaxCompressedBinarySize { - return nil, fmt.Errorf("compressed binary size exceeds the maximum allowed size of %d bytes", modCfg.MaxCompressedBinarySize) + if err := modCfg.MaxCompressedBinaryLimiter.Check(ctx, config.SizeOf(binary)); err != nil { + if errors.Is(err, limits.ErrorBoundLimited[config.Size]{}) { + return nil, fmt.Errorf("compressed binary size exceeds the maximum allowed size of %d bytes: %w", modCfg.MaxCompressedBinarySize, err) + } + return nil, fmt.Errorf("failed to check compressed binary size limit: %w", err) } - - rdr := io.LimitReader(brotli.NewReader(bytes.NewBuffer(binary)), int64(modCfg.MaxDecompressedBinarySize+1)) + maxDecompressedBinarySize, err := modCfg.MaxDecompressedBinaryLimiter.Limit(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get decompressed binary size limit: %w", err) + } + rdr := io.LimitReader(brotli.NewReader(bytes.NewBuffer(binary)), int64(maxDecompressedBinarySize+1)) decompedBinary, err := io.ReadAll(rdr) if err != nil { return nil, fmt.Errorf("failed to decompress binary: %w", err) @@ -238,9 +271,27 @@ func NewModule(modCfg *ModuleConfig, binary []byte, opts ...func(*ModuleConfig)) // Validate the decompressed binary size. // io.LimitReader prevents decompression bombs by reading up to a set limit, but it will not return an error if the limit is reached. // The Read() method will return io.EOF, and ReadAll will gracefully handle it and return nil. - if uint64(len(binary)) > modCfg.MaxDecompressedBinarySize { - return nil, fmt.Errorf("decompressed binary size reached the maximum allowed size of %d bytes", modCfg.MaxDecompressedBinarySize) + if err := modCfg.MaxDecompressedBinaryLimiter.Check(ctx, config.SizeOf(binary)); err != nil { + if errors.Is(err, limits.ErrorBoundLimited[config.Size]{}) { + return nil, fmt.Errorf("decompressed binary size reached the maximum allowed size of %d bytes: %w", modCfg.MaxDecompressedBinarySize, err) + } + return nil, fmt.Errorf("failed to check decompressed binary size limit: %w", err) + } + + return newModule(modCfg, binary) +} + +func newModule(modCfg *ModuleConfig, binary []byte) (*module, error) { + cfg := wasmtime.NewConfig() + cfg.SetEpochInterruption(true) + if modCfg.InitialFuel > 0 { + cfg.SetConsumeFuel(true) } + cfg.CacheConfigLoadDefault() + cfg.SetCraneliftOptLevel(wasmtime.OptLevelSpeedAndSize) + SetUnwinding(cfg) // Handled differenty based on host OS. + + engine := wasmtime.NewEngineWithConfig(cfg) mod, err := wasmtime.NewModule(engine, binary) if err != nil { @@ -256,16 +307,14 @@ func NewModule(modCfg *ModuleConfig, binary []byte, opts ...func(*ModuleConfig)) } } - m := &module{ + return &module{ engine: engine, module: mod, wconfig: cfg, cfg: modCfg, stopCh: make(chan struct{}), v2ImportName: v2ImportName, - } - - return m, nil + }, nil } func linkNoDAG(m *module, store *wasmtime.Store, exec *execution[*sdkpb.ExecutionResult]) (*wasmtime.Instance, error) { @@ -468,7 +517,7 @@ func (m *module) Run(ctx context.Context, request *wasmdagpb.Request) (*wasmdagp computeRequest := r.GetComputeRequest() if computeRequest != nil { computeRequest.RuntimeConfig = &wasmdagpb.RuntimeConfig{ - MaxResponseSizeBytes: int64(m.cfg.MaxResponseSizeBytes), + MaxResponseSizeBytes: int64(maxSize), } } } @@ -493,7 +542,11 @@ func runWasm[I, O proto.Message]( defer store.Close() - setMaxResponseSize(request, m.cfg.MaxResponseSizeBytes) + maxResponseSizeBytes, err := m.cfg.MaxResponseSizeLimiter.Limit(ctx) + if err != nil { + return o, fmt.Errorf("failed to get response size limit: %w", err) + } + setMaxResponseSize(request, uint64(maxResponseSizeBytes)) reqpb, err := proto.Marshal(request) if err != nil { return o, err @@ -517,8 +570,12 @@ func runWasm[I, O proto.Message]( } // Limit memory to max memory megabytes per instance. + maxMemoryBytes, err := m.cfg.MemoryLimiter.Limit(ctx) + if err != nil { + return o, fmt.Errorf("failed to get memory limit: %w", err) + } store.Limiter( - int64(m.cfg.MaxMemoryMBs)*int64(math.Pow(10, 6)), + int64(maxMemoryBytes/config.MByte)*int64(math.Pow(10, 6)), -1, // tableElements, -1 == default 1, // instances 1, // tables diff --git a/pkg/workflows/wasm/host/standard_test.go b/pkg/workflows/wasm/host/standard_test.go index 1ae7ac135..536465397 100644 --- a/pkg/workflows/wasm/host/standard_test.go +++ b/pkg/workflows/wasm/host/standard_test.go @@ -519,7 +519,7 @@ func makeTestModuleByName(t *testing.T, testName string, cfg *ModuleConfig) *mod if cfg == nil { cfg = defaultNoDAGModCfg(t) } - mod, err := NewModule(cfg, binary) + mod, err := NewModule(t.Context(), cfg, binary) require.NoError(t, err) return mod } diff --git a/pkg/workflows/wasm/host/wasm.go b/pkg/workflows/wasm/host/wasm.go index 58a798b60..d8c4bae7f 100644 --- a/pkg/workflows/wasm/host/wasm.go +++ b/pkg/workflows/wasm/host/wasm.go @@ -13,7 +13,7 @@ import ( ) func GetWorkflowSpec(ctx context.Context, modCfg *ModuleConfig, binary []byte, config []byte) (*legacySdk.WorkflowSpec, error) { - m, err := NewModule(modCfg, binary, WithDeterminism()) + m, err := NewModule(ctx, modCfg, binary, WithDeterminism()) if err != nil { return nil, fmt.Errorf("could not instantiate module: %w", err) } diff --git a/pkg/workflows/wasm/host/wasm_nodag_test.go b/pkg/workflows/wasm/host/wasm_nodag_test.go index b1350ce71..8a1156419 100644 --- a/pkg/workflows/wasm/host/wasm_nodag_test.go +++ b/pkg/workflows/wasm/host/wasm_nodag_test.go @@ -13,7 +13,7 @@ import ( "github.com/smartcontractkit/chainlink-common/pkg/logger" "github.com/smartcontractkit/chainlink-protos/cre/go/sdk" - mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -32,7 +32,7 @@ func Test_Sleep_Timeout(t *testing.T) { mc := defaultNoDAGModCfg(t) timeout := 1 * time.Second mc.Timeout = &timeout - m, err := NewModule(mc, binary) + m, err := NewModule(t.Context(), mc, binary) require.NoError(t, err) m.v2ImportName = "test" @@ -63,7 +63,7 @@ func Test_NoDag_Run(t *testing.T) { t.Run("NOK fails with unset ExecutionHelper for trigger", func(t *testing.T) { mc := defaultNoDAGModCfg(t) - m, err := NewModule(mc, binary) + m, err := NewModule(t.Context(), mc, binary) require.NoError(t, err) m.Start() @@ -81,7 +81,7 @@ func Test_NoDag_Run(t *testing.T) { t.Run("OK can subscribe without setting ExecutionHelper", func(t *testing.T) { mc := defaultNoDAGModCfg(t) - m, err := NewModule(mc, binary) + m, err := NewModule(t.Context(), mc, binary) require.NoError(t, err) m.Start() @@ -122,7 +122,7 @@ func Test_NoDAG_LoggingWithLimits(t *testing.T) { binary := createTestBinary(loggingLimitsBinaryCmd, loggingLimitsBinaryLocation, true, t) - m, err := NewModule(cfg, binary) + m, err := NewModule(t.Context(), cfg, binary) require.NoError(t, err) _, err = m.Execute(t.Context(), executeRequest, mockExecutionHelper) diff --git a/pkg/workflows/wasm/host/wasm_test.go b/pkg/workflows/wasm/host/wasm_test.go index a03398ad6..4255b2c32 100644 --- a/pkg/workflows/wasm/host/wasm_test.go +++ b/pkg/workflows/wasm/host/wasm_test.go @@ -206,7 +206,7 @@ func Test_Compute_Emit(t *testing.T) { ctx := t.Context() ctxValue := "test-value" ctx = context.WithValue(ctx, ctxKey, ctxValue) - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(t.Context(), &ModuleConfig{ Logger: lggr, Fetch: fetchFunc, IsUncompressed: true, @@ -236,7 +236,7 @@ func Test_Compute_Emit(t *testing.T) { t.Run("failure on emit writes to error chain and logs", func(t *testing.T) { lggr, logs := logger.TestObserved(t, zapcore.InfoLevel) - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(t.Context(), &ModuleConfig{ Logger: lggr, Fetch: fetchFunc, IsUncompressed: true, @@ -282,7 +282,7 @@ func Test_Compute_Emit(t *testing.T) { t.Run("failure on emit due to missing workflow identifying metadata", func(t *testing.T) { lggr, logs := logger.TestObserved(t, zapcore.InfoLevel) - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(t.Context(), &ModuleConfig{ Logger: lggr, Fetch: fetchFunc, IsUncompressed: true, @@ -336,7 +336,7 @@ func Test_Compute_PanicIsRecovered(t *testing.T) { binary := createTestBinary(computePanicBinaryCmd, computePanicBinaryLocation, true, t) ctx := t.Context() - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(t.Context(), &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, }, binary) @@ -377,7 +377,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -429,7 +429,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -481,7 +481,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -530,7 +530,7 @@ func Test_Compute_Fetch(t *testing.T) { ctx := t.Context() logger, logs := logger.TestObserved(t, zapcore.InfoLevel) - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger, IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -584,7 +584,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(t.Context(), &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -632,7 +632,7 @@ func Test_Compute_Fetch(t *testing.T) { t.Run("OK: context cancelation", func(t *testing.T) { t.Parallel() - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(t.Context(), &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -684,7 +684,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -729,7 +729,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -773,7 +773,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -811,7 +811,6 @@ func Test_Compute_Fetch(t *testing.T) { t.Parallel() binary := createTestBinary(fetchlimitBinaryCmd, fetchlimitBinaryLocation, true, t) ctx := t.Context() - t.Context() expected := FetchResponse{ ExecutionError: false, Body: []byte("valid-response"), @@ -819,7 +818,7 @@ func Test_Compute_Fetch(t *testing.T) { Headers: map[string]string{}, } - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Fetch: func(ctx context.Context, req *FetchRequest) (*FetchResponse, error) { @@ -864,7 +863,7 @@ func TestModule_Errors(t *testing.T) { ctx := t.Context() binary := createTestBinary(successBinaryCmd, successBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) + m, err := NewModule(ctx, &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) require.NoError(t, err) _, err = m.Run(ctx, nil) @@ -911,7 +910,7 @@ func TestModule_Sandbox_Memory(t *testing.T) { ctx := t.Context() binary := createTestBinary(oomBinaryCmd, oomBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) + m, err := NewModule(ctx, &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) require.NoError(t, err) m.Start() @@ -930,7 +929,7 @@ func TestModule_CompressedBinarySize(t *testing.T) { t.Run("compressed binary size is smaller than the default 10mb limit", func(t *testing.T) { binary := createTestBinary(successBinaryCmd, successBinaryLocation, false, t) - _, err := NewModule(&ModuleConfig{IsUncompressed: false, Logger: logger.Test(t)}, binary) + _, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: false, Logger: logger.Test(t)}, binary) require.NoError(t, err) }) @@ -943,7 +942,7 @@ func TestModule_CompressedBinarySize(t *testing.T) { require.NoError(t, err) require.NoError(t, bwr.Close()) - _, err = NewModule(&ModuleConfig{IsUncompressed: false, Logger: logger.Test(t)}, binary) + _, err = NewModule(t.Context(), &ModuleConfig{IsUncompressed: false, Logger: logger.Test(t)}, binary) default10mbLimit := fmt.Sprintf("binary size exceeds the maximum allowed size of %d bytes", defaultMaxCompressedBinarySize) require.ErrorContains(t, err, default10mbLimit) }) @@ -958,7 +957,7 @@ func TestModule_CompressedBinarySize(t *testing.T) { require.NoError(t, err) require.NoError(t, bwr.Close()) - _, err = NewModule(&ModuleConfig{IsUncompressed: false, MaxCompressedBinarySize: customMaxCompressedBinarySize, Logger: logger.Test(t)}, binary) + _, err = NewModule(t.Context(), &ModuleConfig{IsUncompressed: false, MaxCompressedBinarySize: customMaxCompressedBinarySize, Logger: logger.Test(t)}, binary) default10mbLimit := fmt.Sprintf("binary size exceeds the maximum allowed size of %d bytes", customMaxCompressedBinarySize) require.ErrorContains(t, err, default10mbLimit) }) @@ -973,13 +972,13 @@ func TestModule_DecompressedBinarySize(t *testing.T) { require.NoError(t, err) t.Run("decompressed binary size is within the limit", func(t *testing.T) { customDecompressedBinarySize := uint64(len(decompedBinary)) - _, err := NewModule(&ModuleConfig{IsUncompressed: false, MaxDecompressedBinarySize: customDecompressedBinarySize, Logger: logger.Test(t)}, binary) + _, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: false, MaxDecompressedBinarySize: customDecompressedBinarySize, Logger: logger.Test(t)}, binary) require.NoError(t, err) }) t.Run("decompressed binary size is bigger than the limit", func(t *testing.T) { customDecompressedBinarySize := uint64(len(decompedBinary) - 1) - _, err := NewModule(&ModuleConfig{IsUncompressed: false, MaxDecompressedBinarySize: customDecompressedBinarySize, Logger: logger.Test(t)}, binary) + _, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: false, MaxDecompressedBinarySize: customDecompressedBinarySize, Logger: logger.Test(t)}, binary) decompressedSizeExceeded := fmt.Sprintf("decompressed binary size reached the maximum allowed size of %d bytes", customDecompressedBinarySize) require.ErrorContains(t, err, decompressedSizeExceeded) }) @@ -991,7 +990,7 @@ func TestModule_Sandbox_SleepIsStubbedOut(t *testing.T) { binary := createTestBinary(sleepBinaryCmd, sleepBinaryLocation, true, t) d := 1 * time.Millisecond - m, err := NewModule(&ModuleConfig{Timeout: &d, IsUncompressed: true, Logger: logger.Test(t)}, binary) + m, err := NewModule(ctx, &ModuleConfig{Timeout: &d, IsUncompressed: true, Logger: logger.Test(t)}, binary) require.NoError(t, err) m.Start() @@ -1017,7 +1016,7 @@ func TestModule_Sandbox_Timeout(t *testing.T) { binary := createTestBinary(sleepBinaryCmd, sleepBinaryLocation, true, t) tmt := 10 * time.Millisecond - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t), Timeout: &tmt}, binary) + m, err := NewModule(ctx, &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t), Timeout: &tmt}, binary) require.NoError(t, err) m.Start() @@ -1037,7 +1036,7 @@ func TestModule_Sandbox_CantReadFiles(t *testing.T) { ctx := t.Context() binary := createTestBinary(filesBinaryCmd, filesBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) + m, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) require.NoError(t, err) m.Start() @@ -1065,7 +1064,7 @@ func TestModule_Sandbox_CantCreateDir(t *testing.T) { ctx := t.Context() binary := createTestBinary(dirsBinaryCmd, dirsBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) + m, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) require.NoError(t, err) m.Start() @@ -1093,7 +1092,7 @@ func TestModule_Sandbox_HTTPRequest(t *testing.T) { ctx := t.Context() binary := createTestBinary(httpBinaryCmd, httpBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) + m, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) require.NoError(t, err) m.Start() @@ -1121,7 +1120,7 @@ func TestModule_Sandbox_ReadEnv(t *testing.T) { ctx := t.Context() binary := createTestBinary(envBinaryCmd, envBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) + m, err := NewModule(ctx, &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t)}, binary) require.NoError(t, err) m.Start() @@ -1168,7 +1167,7 @@ func TestModule_Sandbox_RandomGet(t *testing.T) { ctx := t.Context() binary := createTestBinary(randBinaryCmd, randBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, Determinism: &DeterminismConfig{ @@ -1187,7 +1186,7 @@ func TestModule_Sandbox_RandomGet(t *testing.T) { ctx := t.Context() binary := createTestBinary(randBinaryCmd, randBinaryLocation, true, t) - m, err := NewModule(&ModuleConfig{ + m, err := NewModule(ctx, &ModuleConfig{ Logger: logger.Test(t), IsUncompressed: true, }, binary) @@ -1215,7 +1214,7 @@ func TestModule_MaxResponseSizeBytesLimit(t *testing.T) { } maxResponseSizeBytes := uint64(10 * 1024) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t), Fetch: fetchFn, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) + m, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t), Fetch: fetchFn, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) require.NoError(t, err) m.Start() @@ -1249,7 +1248,7 @@ func TestModule_MaxResponseSizeBytesLimit(t *testing.T) { // setting a lower limit than the size of the fetch response maxResponseSizeBytes := uint64(1024) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: logger.Test(t), Fetch: fetchFn, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) + m, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: true, Logger: logger.Test(t), Fetch: fetchFn, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) require.NoError(t, err) m.Start() @@ -1285,7 +1284,7 @@ func TestModule_MaxResponseSizeBytesLimit(t *testing.T) { // an emitter response with an error "some error" when marshaled is 14 bytes // setting a maxResponseSizeBytes that should handle that payload maxResponseSizeBytes := uint64(14) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: lggr, Labeler: emitter, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) + m, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: true, Logger: lggr, Labeler: emitter, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) require.NoError(t, err) m.Start() @@ -1339,7 +1338,7 @@ func TestModule_MaxResponseSizeBytesLimit(t *testing.T) { // setting a lower limit than the size of the emitted message maxResponseSizeBytes := uint64(1) - m, err := NewModule(&ModuleConfig{IsUncompressed: true, Logger: lggr, Labeler: emitter, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) + m, err := NewModule(t.Context(), &ModuleConfig{IsUncompressed: true, Logger: lggr, Labeler: emitter, MaxResponseSizeBytes: maxResponseSizeBytes}, binary) require.NoError(t, err) m.Start()