Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .buildkite/code.pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -262,6 +262,7 @@ steps:
--scenario e2e/runtime/rofl
--scenario e2e/runtime/trust-root/.+
--scenario e2e/runtime/keymanager-.+
--scenario e2e/runtime/observer
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To be able to run this in non-sgx mode we would have to fix:
https://github.com/oasisprotocol/oasis-core/blob/master/go/worker/keymanager/secrets.go#L245:L264. I.e. in non-sgx mode access list obtained from the consensus registered nodes is not applied.

Imo out of scope, also fine with stronger test albeit less practical to test locally.

env:
# Unsafe flags needed as the trust-root test rebuilds the enclave with embedded trust root data.
OASIS_UNSAFE_SKIP_AVR_VERIFY: "1"
Expand Down
Empty file added .changelog/6497.trivial.md
Empty file.
8 changes: 4 additions & 4 deletions go/oasis-net-runner/fixtures/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,8 @@ func newDefaultFixture() (*oasis.NetworkFixture, error) {
Kind: registry.KindKeyManager,
Entity: 0,
Keymanager: -1,
AdmissionPolicy: registry.RuntimeAdmissionPolicy{
AnyNode: &registry.AnyNodeRuntimeAdmissionPolicy{},
AdmissionPolicy: oasis.RuntimeAdmissionPolicyFixture{
AnyNode: true,
},
GovernanceModel: registry.GovernanceEntity,
Deployments: []oasis.DeploymentCfg{
Expand Down Expand Up @@ -234,8 +234,8 @@ func newDefaultFixture() (*oasis.NetworkFixture, error) {
BatchFlushTimeout: time.Second,
ProposerTimeout: 2 * time.Second,
},
AdmissionPolicy: registry.RuntimeAdmissionPolicy{
AnyNode: &registry.AnyNodeRuntimeAdmissionPolicy{},
AdmissionPolicy: oasis.RuntimeAdmissionPolicyFixture{
AnyNode: true,
},
GenesisRound: 0,
GovernanceModel: registry.GovernanceEntity,
Expand Down
127 changes: 125 additions & 2 deletions go/oasis-test-runner/oasis/fixture.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
beacon "github.com/oasisprotocol/oasis-core/go/beacon/api"
"github.com/oasisprotocol/oasis-core/go/common"
"github.com/oasisprotocol/oasis-core/go/common/crypto/hash"
"github.com/oasisprotocol/oasis-core/go/common/crypto/signature"
"github.com/oasisprotocol/oasis-core/go/common/node"
"github.com/oasisprotocol/oasis-core/go/common/sgx"
"github.com/oasisprotocol/oasis-core/go/consensus/cometbft/config"
Expand All @@ -28,6 +29,7 @@ type NetworkFixture struct {
Keymanagers []KeymanagerFixture `json:"keymanagers,omitempty"`
KeymanagerPolicies []KeymanagerPolicyFixture `json:"keymanager_policies,omitempty"`
ComputeWorkers []ComputeWorkerFixture `json:"compute_workers,omitempty"`
Observers []ObserverFixture `json:"observers,omitempty"`
Sentries []SentryFixture `json:"sentries,omitempty"`
Clients []ClientFixture `json:"clients,omitempty"`
StatelessClients []StatelessClientFixture `json:"stateless_clients,omitempty"`
Expand Down Expand Up @@ -107,6 +109,13 @@ func (f *NetworkFixture) Create(env *env.Env) (*Network, error) {
}
}

// Provision the observers.
for _, fx := range f.Observers {
if _, err = fx.Create(net); err != nil {
return nil, err
}
}

// Provision the client nodes.
for _, fx := range f.Clients {
if _, err = fx.Create(net); err != nil {
Expand Down Expand Up @@ -241,7 +250,7 @@ type RuntimeFixture struct {
TxnScheduler registry.TxnSchedulerParameters `json:"txn_scheduler"`
Storage registry.StorageParameters `json:"storage"`

AdmissionPolicy registry.RuntimeAdmissionPolicy `json:"admission_policy"`
Comment thread
martintomazic marked this conversation as resolved.
AdmissionPolicy RuntimeAdmissionPolicyFixture `json:"admission_policy"`
Constraints map[scheduler.CommitteeKind]map[scheduler.Role]registry.SchedulingConstraints `json:"constraints,omitempty"`
Staking registry.RuntimeStakingParameters `json:"staking,omitempty"`

Expand All @@ -259,6 +268,10 @@ func (f *RuntimeFixture) Create(netFixture *NetworkFixture, net *Network) (*Runt
if err != nil {
return nil, err
}
admissionPolicy, err := f.AdmissionPolicy.Resolve(net)
if err != nil {
return nil, err
}

var km *Runtime
if f.Keymanager != -1 {
Expand All @@ -282,7 +295,7 @@ func (f *RuntimeFixture) Create(netFixture *NetworkFixture, net *Network) (*Runt
Executor: f.Executor,
TxnScheduler: f.TxnScheduler,
Storage: f.Storage,
AdmissionPolicy: f.AdmissionPolicy,
AdmissionPolicy: admissionPolicy,
Staking: f.Staking,
GenesisRound: f.GenesisRound,
GenesisStateRoot: f.GenesisStateRoot,
Expand All @@ -294,6 +307,69 @@ func (f *RuntimeFixture) Create(netFixture *NetworkFixture, net *Network) (*Runt
})
}

type RuntimeAdmissionPolicyFixture struct {
AnyNode bool `json:"any_node,omitempty"`
EntityWhitelist *EntityWhitelistRuntimeAdmissionPolicyFixture `json:"entity_whitelist,omitempty"`
PerRole map[node.RolesMask]PerRoleAdmissionPolicyFixture `json:"per_role,omitempty"`
}

type EntityWhitelistRuntimeAdmissionPolicyFixture struct {
Entities map[int]registry.EntityWhitelistConfig `json:"entities"`
}

type PerRoleAdmissionPolicyFixture struct {
EntityWhitelist *EntityWhitelistRoleAdmissionPolicyFixture `json:"entity_whitelist,omitempty"`
}

type EntityWhitelistRoleAdmissionPolicyFixture struct {
Entities map[int]registry.EntityWhitelistRoleConfig `json:"entities"`
}

func (f RuntimeAdmissionPolicyFixture) Resolve(net *Network) (registry.RuntimeAdmissionPolicy, error) {
var policy registry.RuntimeAdmissionPolicy

if f.AnyNode {
policy.AnyNode = &registry.AnyNodeRuntimeAdmissionPolicy{}
}

if f.EntityWhitelist != nil {
whitelist := &registry.EntityWhitelistRuntimeAdmissionPolicy{
Entities: make(map[signature.PublicKey]registry.EntityWhitelistConfig),
}
for idx, cfg := range f.EntityWhitelist.Entities {
ent, err := resolveEntity(net, idx)
if err != nil {
return registry.RuntimeAdmissionPolicy{}, err
}
whitelist.Entities[ent.ID()] = cfg
}
policy.EntityWhitelist = whitelist
}

if f.PerRole != nil {
policy.PerRole = make(map[node.RolesMask]registry.PerRoleAdmissionPolicy)
for role, prap := range f.PerRole {
var perRolePolicy registry.PerRoleAdmissionPolicy
if prap.EntityWhitelist != nil {
whitelist := &registry.EntityWhitelistRoleAdmissionPolicy{
Entities: make(map[signature.PublicKey]registry.EntityWhitelistRoleConfig),
}
for idx, cfg := range prap.EntityWhitelist.Entities {
ent, err := resolveEntity(net, idx)
if err != nil {
return registry.RuntimeAdmissionPolicy{}, err
}
whitelist.Entities[ent.ID()] = cfg
}
perRolePolicy.EntityWhitelist = whitelist
}
policy.PerRole[role] = perRolePolicy
}
}

return policy, nil
}

// KeymanagerPolicyFixture is a key manager policy fixture.
type KeymanagerPolicyFixture struct {
Runtime int `json:"runtime"`
Expand Down Expand Up @@ -458,6 +534,53 @@ func (f *ComputeWorkerFixture) Create(net *Network) (*Compute, error) {
})
}

// ObserverFixture is an observer node fixture.
type ObserverFixture struct {
NodeFixture

// Entity is the index of the entity the node will be provisioned with.
Entity int `json:"entity"`

AllowErrorTermination bool `json:"allow_error_termination"`
AllowEarlyTermination bool `json:"allow_early_termination"`

// Consensus contains configuration for the consensus backend.
Consensus ConsensusFixture `json:"consensus"`

// Runtimes contains the indexes of the runtimes to enable.
Runtimes []int `json:"runtimes,omitempty"`

// RuntimeProvisioner is the runtime provisioner configuration.
RuntimeProvisioner runtimeConfig.RuntimeProvisioner `json:"runtime_provisioner"`

// RuntimeConfig contains the per-runtime node-local configuration.
RuntimeConfig map[int]map[string]any `json:"runtime_config,omitempty"`
}

// Create instantiates the observer node described by the fixture.
func (f *ObserverFixture) Create(net *Network) (*Observer, error) {
entity, err := resolveEntity(net, f.Entity)
if err != nil {
return nil, err
}

return net.NewObserver(&ObserverCfg{
NodeCfg: NodeCfg{
Name: f.Name,
Consensus: f.Consensus,
AllowErrorTermination: f.AllowErrorTermination,
AllowEarlyTermination: f.AllowEarlyTermination,
NoAutoStart: f.NoAutoStart,
SupplementarySanityInterval: f.Consensus.SupplementarySanityInterval,
Comment thread
martintomazic marked this conversation as resolved.
Entity: entity,
ExtraArgs: f.ExtraArgs,
},
Runtimes: f.Runtimes,
RuntimeProvisioner: f.RuntimeProvisioner,
RuntimeConfig: f.RuntimeConfig,
})
}

// SeedFixture is a seed node fixture.
type SeedFixture struct {
NodeFixture
Expand Down
12 changes: 11 additions & 1 deletion go/oasis-test-runner/oasis/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ type Network struct {
runtimes []*Runtime
keymanagers []*Keymanager
computeWorkers []*Compute
observers []*Observer
sentries []*Sentry
clients []*Client
statelessClients []*StatelessClient
Expand Down Expand Up @@ -229,6 +230,11 @@ func (net *Network) ComputeWorkers() []*Compute {
return net.computeWorkers
}

// Observers returns the observer nodes associated with the network.
func (net *Network) Observers() []*Observer {
return net.observers
}

// Sentries returns the sentry nodes associated with the network.
func (net *Network) Sentries() []*Sentry {
return net.sentries
Expand All @@ -249,7 +255,7 @@ func (net *Network) Byzantine() []*Byzantine {
return net.byzantine
}

// Nodes returns all the validator, compute, storage, keymanager and client nodes associated with
// Nodes returns all the validator, compute, observer, keymanager and client nodes associated with
// the network.
//
// Seed, sentry, byzantine and IAS proxy nodes are omitted if they're only hosting these single features.
Expand All @@ -262,6 +268,9 @@ func (net *Network) Nodes() []*Node {
for _, c := range net.ComputeWorkers() {
nodes = append(nodes, c.Node)
}
for _, c := range net.Observers() {
nodes = append(nodes, c.Node)
}
for _, k := range net.Keymanagers() {
nodes = append(nodes, k.Node)
}
Expand Down Expand Up @@ -349,6 +358,7 @@ func (net *Network) NumRegisterNodes() int {
return len(net.validators) +
len(net.keymanagers) +
len(net.computeWorkers) +
len(net.observers) +
len(net.byzantine)
}

Expand Down
3 changes: 1 addition & 2 deletions go/oasis-test-runner/oasis/oasis.go
Original file line number Diff line number Diff line change
Expand Up @@ -458,8 +458,7 @@ func (n *Node) setProvisionedIdentity(seed string) error {
return err
}

if n.entity != nil {
// Client nodes may need a provisioned identity. They never need an entity, however.
if n.entity != nil { // client nodes don't have entity id.
if err := n.entity.addNode(nodeSigner); err != nil {
return err
}
Expand Down
121 changes: 121 additions & 0 deletions go/oasis-test-runner/oasis/observer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
package oasis

import (
"fmt"
"os"
"strconv"

"github.com/oasisprotocol/oasis-core/go/config"
"github.com/oasisprotocol/oasis-core/go/runtime/bundle"
runtimeConfig "github.com/oasisprotocol/oasis-core/go/runtime/config"
)

const (
observerIdentitySeedTemplate = "ekiden node observer %d"
)

// Observer is an Oasis observer node.
type Observer struct {
*Node

consensusPort uint16
p2pPort uint16

runtimes []int
runtimeConfig map[int]map[string]any
runtimeProvisioner runtimeConfig.RuntimeProvisioner
}

// ObserverCfg is the Oasis observer node provisioning configuration.
type ObserverCfg struct {
NodeCfg

Runtimes []int
RuntimeConfig map[int]map[string]any
RuntimeProvisioner runtimeConfig.RuntimeProvisioner
}

// UpdateRuntimes updates the observer node runtimes.
func (o *Observer) UpdateRuntimes(runtimes []int) {
o.runtimes = runtimes
}

func (o *Observer) AddArgs(args *argBuilder) error {
args.appendNetwork(o.net)

if o.entity.isDebugTestEntity {
args.appendDebugTestEntity()
}

for _, idx := range o.runtimes {
v := o.net.runtimes[idx]
// XXX: could support configurable binary idx if ever needed.
o.addHostedRuntime(v, o.runtimeConfig[idx])
}

return nil
}

func (o *Observer) ModifyConfig() error {
o.Config.Consensus.ListenAddress = allInterfacesAddr + ":" + strconv.Itoa(int(o.consensusPort))
o.Config.Consensus.ExternalAddress = localhostAddr + ":" + strconv.Itoa(int(o.consensusPort))

if o.supplementarySanityInterval > 0 {
o.Config.Consensus.SupplementarySanity.Enabled = true
o.Config.Consensus.SupplementarySanity.Interval = o.supplementarySanityInterval
}

o.Config.P2P.Port = o.p2pPort

if !o.entity.isDebugTestEntity {
entityID, _ := o.entity.ID().MarshalText() // Cannot fail.
o.Config.Registration.EntityID = string(entityID)
}

o.Config.Mode = config.ModeClient
o.Config.Runtime.Provisioner = o.runtimeProvisioner
o.Config.Runtime.SGX.Loader = o.net.cfg.RuntimeSGXLoaderBinary
o.Config.Runtime.AttestInterval = o.net.cfg.RuntimeAttestInterval

o.AddSeedNodesToConfig()

return nil
}

// NewObserver provisions a new observer node and adds it to the network.
func (net *Network) NewObserver(cfg *ObserverCfg) (*Observer, error) {
observerName := fmt.Sprintf("observer-%d", len(net.observers))
host, err := net.GetNamedNode(observerName, &cfg.NodeCfg)
if err != nil {
return nil, err
}

// Pre-provision the node identity so that we can identify the entity.
err = host.setProvisionedIdentity(fmt.Sprintf(observerIdentitySeedTemplate, len(net.observers)))
if err != nil {
return nil, fmt.Errorf("oasis/observer: failed to provision node identity: %w", err)
}

if cfg.RuntimeProvisioner == "" {
cfg.RuntimeProvisioner = runtimeConfig.RuntimeProvisionerSandboxed
}

observer := &Observer{
Node: host,
runtimes: cfg.Runtimes,
runtimeProvisioner: cfg.RuntimeProvisioner,
runtimeConfig: cfg.RuntimeConfig,
consensusPort: host.getProvisionedPort(nodePortConsensus),
p2pPort: host.getProvisionedPort(nodePortP2P),
}

// Remove any exploded bundles on cleanup.
net.env.AddOnCleanup(func() {
_ = os.RemoveAll(bundle.ExplodedPath(observer.dir.String()))
})

net.observers = append(net.observers, observer)
host.features = append(host.features, observer)

return observer, nil
}
Loading
Loading