vendor: github.com/containerd/nri v0.11.0

- adds compatibility with runtime-spec v1.3.0
- adds `nri_no_wasm` build-tag to compile without wasm support
- adds `ErrWasmDisabled` error

full diff: https://github.com/containerd/nri/compare/v0.10.0...v0.11.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2025-12-12 10:23:42 +01:00
parent 32d4f64a65
commit 01440122f2
33 changed files with 4727 additions and 1117 deletions

2
go.mod
View File

@@ -27,7 +27,7 @@ require (
github.com/containerd/errdefs v1.0.0
github.com/containerd/fifo v1.1.0
github.com/containerd/log v0.1.0
github.com/containerd/nri v0.10.0
github.com/containerd/nri v0.11.0
github.com/containerd/platforms v1.0.0-rc.2
github.com/containerd/typeurl/v2 v2.2.3
github.com/coreos/go-systemd/v22 v22.6.0

4
go.sum
View File

@@ -154,8 +154,8 @@ github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gG
github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/nri v0.10.0 h1:bt2NzfvlY6OJE0i+fB5WVeGQEycxY7iFVQpEbh7J3Go=
github.com/containerd/nri v0.10.0/go.mod h1:5VyvLa/4uL8FjyO8nis1UjbCutXDpngil17KvBSL6BU=
github.com/containerd/nri v0.11.0 h1:26mcQwNG58AZn0YkOrlJQ0yxQVmyZooflnVWJTqQrqQ=
github.com/containerd/nri v0.11.0/go.mod h1:bjGTLdUA58WgghKHg8azFMGXr05n1wDHrt3NSVBHiGI=
github.com/containerd/nydus-snapshotter v0.15.4 h1:l59kGRVMtwMLDLh322HsWhEsBCkRKMkGWYV5vBeLYCE=
github.com/containerd/nydus-snapshotter v0.15.4/go.mod h1:eRJqnxQDr48HNop15kZdLZpFF5B6vf6Q11Aq1K0E4Ms=
github.com/containerd/platforms v1.0.0-rc.2 h1:0SPgaNZPVWGEi4grZdV8VRYQn78y+nm6acgLGv/QzE4=

View File

@@ -32,8 +32,6 @@ import (
"github.com/containerd/nri/pkg/log"
validator "github.com/containerd/nri/plugins/default-validator/builtin"
"github.com/containerd/ttrpc"
"github.com/tetratelabs/wazero"
"github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1"
"google.golang.org/protobuf/proto"
)
@@ -43,7 +41,7 @@ const (
DefaultPluginPath = "/opt/nri/plugins"
// DefaultSocketPath is the default socket path for external plugins.
DefaultSocketPath = api.DefaultSocketPath
// PluginConfigDir is the drop-in directory for NRI-launched plugin configuration.
// DefaultPluginConfigPath is the drop-in directory for NRI-launched plugin configuration.
DefaultPluginConfigPath = "/etc/nri/conf.d"
)
@@ -80,6 +78,9 @@ type Adaptation struct {
var (
// Used instead of nil Context in logging.
noCtx = context.TODO()
// ErrWasmDisabled is returned for WASM initialization if WASM support is disabled.
ErrWasmDisabled = errors.New("WASM support is disabled (at build time)")
)
// Option to apply to the NRI runtime.
@@ -155,23 +156,12 @@ func New(name, version string, syncFn SyncFn, updateFn UpdateFn, opts ...Option)
return nil, fmt.Errorf("failed to create NRI adaptation, nil UpdateFn")
}
wasmWithCloseOnContextDone := func(ctx context.Context) (wazero.Runtime, error) {
var (
cfg = wazero.NewRuntimeConfig().WithCloseOnContextDone(true)
r = wazero.NewRuntimeWithConfig(ctx, cfg)
)
if _, err := wasi_snapshot_preview1.Instantiate(ctx, r); err != nil {
wasmService, err := getWasmService()
if err != nil {
log.Errorf(noCtx, "failed to initialize WASM support: %v", err)
if !errors.Is(err, ErrWasmDisabled) {
return nil, err
}
return r, nil
}
wasmPlugins, err := api.NewPluginPlugin(
context.Background(),
api.WazeroRuntime(wasmWithCloseOnContextDone),
)
if err != nil {
return nil, fmt.Errorf("unable to initialize WASM service: %w", err)
}
r := &Adaptation{
@@ -183,7 +173,7 @@ func New(name, version string, syncFn SyncFn, updateFn UpdateFn, opts ...Option)
dropinPath: DefaultPluginConfigPath,
socketPath: DefaultSocketPath,
syncLock: sync.RWMutex{},
wasmService: wasmPlugins,
wasmService: wasmService,
}
for _, o := range opts {
@@ -711,6 +701,7 @@ func (r *Adaptation) finishedPluginSync() {
r.syncLock.Unlock()
}
// PluginSyncBlock is a handle for blocking plugin synchronization.
type PluginSyncBlock struct {
r *Adaptation
}

View File

@@ -25,7 +25,8 @@ import (
//
// Aliased request/response/event types for api/api.proto.
// nolint
//
//nolint:revive // revive thinks the comment is for the exported type below
type (
RegisterPluginRequest = api.RegisterPluginRequest
RegisterPluginResponse = api.Empty
@@ -91,18 +92,25 @@ type (
LinuxDeviceCgroup = api.LinuxDeviceCgroup
LinuxIOPriority = api.LinuxIOPriority
LinuxSeccomp = api.LinuxSeccomp
LinuxNetDevice = api.LinuxNetDevice
LinuxScheduler = api.LinuxScheduler
LinuxSchedulerPolicy = api.LinuxSchedulerPolicy
LinuxSchedulerFlag = api.LinuxSchedulerFlag
LinuxRdt = api.LinuxRdt
CDIDevice = api.CDIDevice
HugepageLimit = api.HugepageLimit
Hooks = api.Hooks
Hook = api.Hook
POSIXRlimit = api.POSIXRlimit
SecurityProfile = api.SecurityProfile
User = api.User
EventMask = api.EventMask
)
// Aliased consts for api/api.proto.
// nolint
//
//nolint:revive // ignore const naming from auto-generated code
const (
Event_UNKNOWN = api.Event_UNKNOWN
Event_RUN_POD_SANDBOX = api.Event_RUN_POD_SANDBOX
@@ -134,7 +142,8 @@ const (
)
// Aliased types for api/optional.go.
// nolint
//
//nolint:revive // revive thinks the comment is for the exported type below
type (
OptionalString = api.OptionalString
OptionalInt = api.OptionalInt
@@ -147,20 +156,19 @@ type (
)
// Aliased functions for api/optional.go.
// nolint
var (
String = api.String
Int = api.Int
Int32 = api.Int32
UInt32 = api.UInt32
Int64 = api.Int64
UInt64 = api.UInt64
Bool = api.Bool
FileMode = api.FileMode
String = api.String
RepeatedString = api.RepeatedString
Int = api.Int
Int32 = api.Int32
UInt32 = api.UInt32
Int64 = api.Int64
UInt64 = api.UInt64
Bool = api.Bool
FileMode = api.FileMode
)
// Aliased functions for api/types.go.
// nolint
var (
FromOCIMounts = api.FromOCIMounts
FromOCIHooks = api.FromOCIHooks

View File

@@ -22,12 +22,19 @@ import (
"github.com/containerd/nri/pkg/api"
)
// BuiltinPlugin implements the NRI API and runs in-process
// within the container runtime.
//
//nolint:revive // tautology builtin.Builtin*
type BuiltinPlugin struct {
Base string
Index string
Handlers BuiltinHandlers
}
// BuiltinHandlers contains request handlers for the builtin plugin.
//
//nolint:revive // tautology builtin.Builtin*
type BuiltinHandlers struct {
Configure func(context.Context, *api.ConfigureRequest) (*api.ConfigureResponse, error)
Synchronize func(context.Context, *api.SynchronizeRequest) (*api.SynchronizeResponse, error)
@@ -48,6 +55,7 @@ type BuiltinHandlers struct {
ValidateContainerAdjustment func(context.Context, *api.ValidateContainerAdjustmentRequest) error
}
// Configure implements PluginService of the NRI API.
func (b *BuiltinPlugin) Configure(ctx context.Context, req *api.ConfigureRequest) (*api.ConfigureResponse, error) {
var (
rpl = &api.ConfigureResponse{}
@@ -110,6 +118,7 @@ func (b *BuiltinPlugin) Configure(ctx context.Context, req *api.ConfigureRequest
return rpl, err
}
// Synchronize implements PluginService of the NRI API.
func (b *BuiltinPlugin) Synchronize(ctx context.Context, req *api.SynchronizeRequest) (*api.SynchronizeResponse, error) {
if b.Handlers.Synchronize != nil {
return b.Handlers.Synchronize(ctx, req)
@@ -117,10 +126,12 @@ func (b *BuiltinPlugin) Synchronize(ctx context.Context, req *api.SynchronizeReq
return &api.SynchronizeResponse{}, nil
}
// Shutdown implements PluginService of the NRI API.
func (b *BuiltinPlugin) Shutdown(context.Context, *api.ShutdownRequest) (*api.ShutdownResponse, error) {
return &api.ShutdownResponse{}, nil
}
// CreateContainer implements PluginService of the NRI API.
func (b *BuiltinPlugin) CreateContainer(ctx context.Context, req *api.CreateContainerRequest) (*api.CreateContainerResponse, error) {
if b.Handlers.CreateContainer != nil {
return b.Handlers.CreateContainer(ctx, req)
@@ -128,6 +139,7 @@ func (b *BuiltinPlugin) CreateContainer(ctx context.Context, req *api.CreateCont
return &api.CreateContainerResponse{}, nil
}
// UpdateContainer implements PluginService of the NRI API.
func (b *BuiltinPlugin) UpdateContainer(ctx context.Context, req *api.UpdateContainerRequest) (*api.UpdateContainerResponse, error) {
if b.Handlers.UpdateContainer != nil {
return b.Handlers.UpdateContainer(ctx, req)
@@ -135,6 +147,7 @@ func (b *BuiltinPlugin) UpdateContainer(ctx context.Context, req *api.UpdateCont
return &api.UpdateContainerResponse{}, nil
}
// StopContainer implements PluginService of the NRI API.
func (b *BuiltinPlugin) StopContainer(ctx context.Context, req *api.StopContainerRequest) (*api.StopContainerResponse, error) {
if b.Handlers.StopContainer != nil {
return b.Handlers.StopContainer(ctx, req)
@@ -142,6 +155,7 @@ func (b *BuiltinPlugin) StopContainer(ctx context.Context, req *api.StopContaine
return &api.StopContainerResponse{}, nil
}
// StateChange implements PluginService of the NRI API.
func (b *BuiltinPlugin) StateChange(ctx context.Context, evt *api.StateChangeEvent) (*api.StateChangeResponse, error) {
var err error
switch evt.Event {
@@ -182,6 +196,7 @@ func (b *BuiltinPlugin) StateChange(ctx context.Context, evt *api.StateChangeEve
return &api.StateChangeResponse{}, err
}
// UpdatePodSandbox implements PluginService of the NRI API.
func (b *BuiltinPlugin) UpdatePodSandbox(ctx context.Context, req *api.UpdatePodSandboxRequest) (*api.UpdatePodSandboxResponse, error) {
if b.Handlers.UpdatePodSandbox != nil {
return b.Handlers.UpdatePodSandbox(ctx, req)
@@ -189,6 +204,7 @@ func (b *BuiltinPlugin) UpdatePodSandbox(ctx context.Context, req *api.UpdatePod
return &api.UpdatePodSandboxResponse{}, nil
}
// PostUpdatePodSandbox is a handler for the PostUpdatePodSandbox event.
func (b *BuiltinPlugin) PostUpdatePodSandbox(ctx context.Context, req *api.PostUpdatePodSandboxRequest) error {
if b.Handlers.PostUpdatePodSandbox != nil {
return b.Handlers.PostUpdatePodSandbox(ctx, req)
@@ -196,6 +212,7 @@ func (b *BuiltinPlugin) PostUpdatePodSandbox(ctx context.Context, req *api.PostU
return nil
}
// ValidateContainerAdjustment implements PluginService of the NRI API.
func (b *BuiltinPlugin) ValidateContainerAdjustment(ctx context.Context, req *api.ValidateContainerAdjustmentRequest) (*api.ValidateContainerAdjustmentResponse, error) {
if b.Handlers.ValidateContainerAdjustment != nil {
if err := b.Handlers.ValidateContainerAdjustment(ctx, req); err != nil {

View File

@@ -104,6 +104,10 @@ func (r *Adaptation) newLaunchedPlugin(dir, idx, base, cfg string) (p *plugin, r
fullPath := filepath.Join(dir, name)
if isWasm(fullPath) {
if r.wasmService == nil {
return nil, fmt.Errorf("can't load WASM plugin %s: no WASM support", fullPath)
}
log.Infof(noCtx, "Found WASM plugin: %s", fullPath)
wasm, err := r.wasmService.Load(context.Background(), fullPath, wasmHostFunctions{})
if err != nil {

View File

@@ -82,6 +82,9 @@ func collectCreateContainerResult(request *CreateContainerRequest) *result {
if request.Container.Linux.Namespaces == nil {
request.Container.Linux.Namespaces = []*LinuxNamespace{}
}
if request.Container.Linux.NetDevices == nil {
request.Container.Linux.NetDevices = map[string]*LinuxNetDevice{}
}
return &result{
request: resultRequest{
@@ -104,6 +107,7 @@ func collectCreateContainerResult(request *CreateContainerRequest) *result {
Unified: map[string]string{},
},
Namespaces: []*LinuxNamespace{},
NetDevices: map[string]*LinuxNetDevice{},
},
},
},
@@ -235,7 +239,20 @@ func (r *result) adjust(rpl *ContainerAdjustment, plugin string) error {
if err := r.adjustNamespaces(rpl.Linux.Namespaces, plugin); err != nil {
return err
}
if err := r.adjustSysctl(rpl.Linux.Sysctl, plugin); err != nil {
return err
}
if err := r.adjustLinuxNetDevices(rpl.Linux.NetDevices, plugin); err != nil {
return err
}
if err := r.adjustLinuxScheduler(rpl.Linux.Scheduler, plugin); err != nil {
return err
}
if err := r.adjustRdt(rpl.Linux.Rdt, plugin); err != nil {
return err
}
}
if err := r.adjustRlimits(rpl.Rlimits, plugin); err != nil {
return err
}
@@ -451,6 +468,83 @@ func (r *result) adjustNamespaces(namespaces []*LinuxNamespace, plugin string) e
return nil
}
func (r *result) adjustSysctl(sysctl map[string]string, plugin string) error {
if len(sysctl) == 0 {
return nil
}
create, id := r.request.create, r.request.create.Container.Id
del := map[string]struct{}{}
for k := range sysctl {
if key, marked := IsMarkedForRemoval(k); marked {
del[key] = struct{}{}
delete(sysctl, k)
}
}
for k, v := range sysctl {
if _, ok := del[k]; ok {
r.owners.ClearSysctl(id, k, plugin)
delete(create.Container.Linux.Sysctl, k)
r.reply.adjust.Linux.Sysctl[MarkForRemoval(k)] = ""
}
if err := r.owners.ClaimSysctl(id, k, plugin); err != nil {
return err
}
create.Container.Linux.Sysctl[k] = v
r.reply.adjust.Linux.Sysctl[k] = v
delete(del, k)
}
for k := range del {
r.reply.adjust.Annotations[MarkForRemoval(k)] = ""
}
return nil
}
func (r *result) adjustRdt(rdt *LinuxRdt, plugin string) error {
if r == nil {
return nil
}
r.initAdjustRdt()
id := r.request.create.Container.Id
if rdt.GetRemove() {
r.owners.ClearRdt(id, plugin)
r.reply.adjust.Linux.Rdt = &LinuxRdt{
// Propagate the remove request (if not overridden below).
Remove: true,
}
}
if v := rdt.GetClosId(); v != nil {
if err := r.owners.ClaimRdtClosID(id, plugin); err != nil {
return err
}
r.reply.adjust.Linux.Rdt.ClosId = String(v.GetValue())
r.reply.adjust.Linux.Rdt.Remove = false
}
if v := rdt.GetSchemata(); v != nil {
if err := r.owners.ClaimRdtSchemata(id, plugin); err != nil {
return err
}
r.reply.adjust.Linux.Rdt.Schemata = RepeatedString(v.GetValue())
r.reply.adjust.Linux.Rdt.Remove = false
}
if v := rdt.GetEnableMonitoring(); v != nil {
if err := r.owners.ClaimRdtEnableMonitoring(id, plugin); err != nil {
return err
}
r.reply.adjust.Linux.Rdt.EnableMonitoring = Bool(v.GetValue())
r.reply.adjust.Linux.Rdt.Remove = false
}
return nil
}
func (r *result) adjustCDIDevices(devices []*CDIDevice, plugin string) error {
if len(devices) == 0 {
return nil
@@ -622,6 +716,159 @@ func (r *result) adjustHooks(hooks *Hooks, plugin string) error {
return nil
}
func (r *result) adjustMemoryResource(mem, targetContainer, targetReply *LinuxMemory, id, plugin string) error {
if mem == nil {
return nil
}
if v := mem.GetLimit(); v != nil {
if err := r.owners.ClaimMemLimit(id, plugin); err != nil {
return err
}
targetContainer.Limit = Int64(v.GetValue())
if targetReply != nil {
targetReply.Limit = Int64(v.GetValue())
}
}
if v := mem.GetReservation(); v != nil {
if err := r.owners.ClaimMemReservation(id, plugin); err != nil {
return err
}
targetContainer.Reservation = Int64(v.GetValue())
if targetReply != nil {
targetReply.Reservation = Int64(v.GetValue())
}
}
if v := mem.GetSwap(); v != nil {
if err := r.owners.ClaimMemSwapLimit(id, plugin); err != nil {
return err
}
targetContainer.Swap = Int64(v.GetValue())
if targetReply != nil {
targetReply.Swap = Int64(v.GetValue())
}
}
if v := mem.GetKernel(); v != nil {
if err := r.owners.ClaimMemKernelLimit(id, plugin); err != nil {
return err
}
targetContainer.Kernel = Int64(v.GetValue())
if targetReply != nil {
targetReply.Kernel = Int64(v.GetValue())
}
}
if v := mem.GetKernelTcp(); v != nil {
if err := r.owners.ClaimMemTCPLimit(id, plugin); err != nil {
return err
}
targetContainer.KernelTcp = Int64(v.GetValue())
if targetReply != nil {
targetReply.KernelTcp = Int64(v.GetValue())
}
}
if v := mem.GetSwappiness(); v != nil {
if err := r.owners.ClaimMemSwappiness(id, plugin); err != nil {
return err
}
targetContainer.Swappiness = UInt64(v.GetValue())
if targetReply != nil {
targetReply.Swappiness = UInt64(v.GetValue())
}
}
if v := mem.GetDisableOomKiller(); v != nil {
if err := r.owners.ClaimMemDisableOomKiller(id, plugin); err != nil {
return err
}
targetContainer.DisableOomKiller = Bool(v.GetValue())
if targetReply != nil {
targetReply.DisableOomKiller = Bool(v.GetValue())
}
}
if v := mem.GetUseHierarchy(); v != nil {
if err := r.owners.ClaimMemUseHierarchy(id, plugin); err != nil {
return err
}
targetContainer.UseHierarchy = Bool(v.GetValue())
if targetReply != nil {
targetReply.UseHierarchy = Bool(v.GetValue())
}
}
return nil
}
func (r *result) adjustCPUResource(cpu, targetContainer, targetReply *LinuxCPU, id, plugin string) error {
if cpu == nil {
return nil
}
if v := cpu.GetShares(); v != nil {
if err := r.owners.ClaimCPUShares(id, plugin); err != nil {
return err
}
targetContainer.Shares = UInt64(v.GetValue())
if targetReply != nil {
targetReply.Shares = UInt64(v.GetValue())
}
}
if v := cpu.GetQuota(); v != nil {
if err := r.owners.ClaimCPUQuota(id, plugin); err != nil {
return err
}
targetContainer.Quota = Int64(v.GetValue())
if targetReply != nil {
targetReply.Quota = Int64(v.GetValue())
}
}
if v := cpu.GetPeriod(); v != nil {
if err := r.owners.ClaimCPUPeriod(id, plugin); err != nil {
return err
}
targetContainer.Period = UInt64(v.GetValue())
if targetReply != nil {
targetReply.Period = UInt64(v.GetValue())
}
}
if v := cpu.GetRealtimeRuntime(); v != nil {
if err := r.owners.ClaimCPURealtimeRuntime(id, plugin); err != nil {
return err
}
targetContainer.RealtimeRuntime = Int64(v.GetValue())
if targetReply != nil {
targetReply.RealtimeRuntime = Int64(v.GetValue())
}
}
if v := cpu.GetRealtimePeriod(); v != nil {
if err := r.owners.ClaimCPURealtimePeriod(id, plugin); err != nil {
return err
}
targetContainer.RealtimePeriod = UInt64(v.GetValue())
if targetReply != nil {
targetReply.RealtimePeriod = UInt64(v.GetValue())
}
}
if v := cpu.GetCpus(); v != "" {
if err := r.owners.ClaimCPUSetCPUs(id, plugin); err != nil {
return err
}
targetContainer.Cpus = v
if targetReply != nil {
targetReply.Cpus = v
}
}
if v := cpu.GetMems(); v != "" {
if err := r.owners.ClaimCPUSetMems(id, plugin); err != nil {
return err
}
targetContainer.Mems = v
if targetReply != nil {
targetReply.Mems = v
}
}
return nil
}
func (r *result) adjustResources(resources *LinuxResources, plugin string) error {
if resources == nil {
return nil
@@ -631,114 +878,12 @@ func (r *result) adjustResources(resources *LinuxResources, plugin string) error
container := create.Container.Linux.Resources
reply := r.reply.adjust.Linux.Resources
if mem := resources.Memory; mem != nil {
if v := mem.GetLimit(); v != nil {
if err := r.owners.ClaimMemLimit(id, plugin); err != nil {
return err
}
container.Memory.Limit = Int64(v.GetValue())
reply.Memory.Limit = Int64(v.GetValue())
}
if v := mem.GetReservation(); v != nil {
if err := r.owners.ClaimMemReservation(id, plugin); err != nil {
return err
}
container.Memory.Reservation = Int64(v.GetValue())
reply.Memory.Reservation = Int64(v.GetValue())
}
if v := mem.GetSwap(); v != nil {
if err := r.owners.ClaimMemSwapLimit(id, plugin); err != nil {
return err
}
container.Memory.Swap = Int64(v.GetValue())
reply.Memory.Swap = Int64(v.GetValue())
}
if v := mem.GetKernel(); v != nil {
if err := r.owners.ClaimMemKernelLimit(id, plugin); err != nil {
return err
}
container.Memory.Kernel = Int64(v.GetValue())
reply.Memory.Kernel = Int64(v.GetValue())
}
if v := mem.GetKernelTcp(); v != nil {
if err := r.owners.ClaimMemTCPLimit(id, plugin); err != nil {
return err
}
container.Memory.KernelTcp = Int64(v.GetValue())
reply.Memory.KernelTcp = Int64(v.GetValue())
}
if v := mem.GetSwappiness(); v != nil {
if err := r.owners.ClaimMemSwappiness(id, plugin); err != nil {
return err
}
container.Memory.Swappiness = UInt64(v.GetValue())
reply.Memory.Swappiness = UInt64(v.GetValue())
}
if v := mem.GetDisableOomKiller(); v != nil {
if err := r.owners.ClaimMemDisableOomKiller(id, plugin); err != nil {
return err
}
container.Memory.DisableOomKiller = Bool(v.GetValue())
reply.Memory.DisableOomKiller = Bool(v.GetValue())
}
if v := mem.GetUseHierarchy(); v != nil {
if err := r.owners.ClaimMemUseHierarchy(id, plugin); err != nil {
return err
}
container.Memory.UseHierarchy = Bool(v.GetValue())
reply.Memory.UseHierarchy = Bool(v.GetValue())
}
if err := r.adjustMemoryResource(resources.Memory, container.Memory, reply.Memory, id, plugin); err != nil {
return err
}
if cpu := resources.Cpu; cpu != nil {
if v := cpu.GetShares(); v != nil {
if err := r.owners.ClaimCPUShares(id, plugin); err != nil {
return err
}
container.Cpu.Shares = UInt64(v.GetValue())
reply.Cpu.Shares = UInt64(v.GetValue())
}
if v := cpu.GetQuota(); v != nil {
if err := r.owners.ClaimCPUQuota(id, plugin); err != nil {
return err
}
container.Cpu.Quota = Int64(v.GetValue())
reply.Cpu.Quota = Int64(v.GetValue())
}
if v := cpu.GetPeriod(); v != nil {
if err := r.owners.ClaimCPUPeriod(id, plugin); err != nil {
return err
}
container.Cpu.Period = UInt64(v.GetValue())
reply.Cpu.Period = UInt64(v.GetValue())
}
if v := cpu.GetRealtimeRuntime(); v != nil {
if err := r.owners.ClaimCPURealtimeRuntime(id, plugin); err != nil {
return err
}
container.Cpu.RealtimeRuntime = Int64(v.GetValue())
reply.Cpu.RealtimeRuntime = Int64(v.GetValue())
}
if v := cpu.GetRealtimePeriod(); v != nil {
if err := r.owners.ClaimCPURealtimePeriod(id, plugin); err != nil {
return err
}
container.Cpu.RealtimePeriod = UInt64(v.GetValue())
reply.Cpu.RealtimePeriod = UInt64(v.GetValue())
}
if v := cpu.GetCpus(); v != "" {
if err := r.owners.ClaimCPUSetCPUs(id, plugin); err != nil {
return err
}
container.Cpu.Cpus = v
reply.Cpu.Cpus = v
}
if v := cpu.GetMems(); v != "" {
if err := r.owners.ClaimCPUSetMems(id, plugin); err != nil {
return err
}
container.Cpu.Mems = v
reply.Cpu.Mems = v
}
if err := r.adjustCPUResource(resources.Cpu, container.Cpu, reply.Cpu, id, plugin); err != nil {
return err
}
for _, l := range resources.HugepageLimits {
@@ -858,6 +1003,23 @@ func (r *result) adjustSeccompPolicy(adjustment *LinuxSeccomp, plugin string) er
return nil
}
func (r *result) adjustLinuxScheduler(sch *LinuxScheduler, plugin string) error {
if sch == nil {
return nil
}
create, id := r.request.create, r.request.create.Container.Id
if err := r.owners.ClaimLinuxScheduler(id, plugin); err != nil {
return err
}
create.Container.Linux.Scheduler = sch
r.reply.adjust.Linux.Scheduler = sch
return nil
}
func (r *result) adjustRlimits(rlimits []*POSIXRlimit, plugin string) error {
create, id, adjust := r.request.create, r.request.create.Container.Id, r.reply.adjust
for _, l := range rlimits {
@@ -871,6 +1033,41 @@ func (r *result) adjustRlimits(rlimits []*POSIXRlimit, plugin string) error {
return nil
}
func (r *result) adjustLinuxNetDevices(devices map[string]*LinuxNetDevice, plugin string) error {
if len(devices) == 0 {
return nil
}
create, id := r.request.create, r.request.create.Container.Id
del := map[string]struct{}{}
for k := range devices {
if key, marked := IsMarkedForRemoval(k); marked {
del[key] = struct{}{}
delete(devices, k)
}
}
for k, v := range devices {
if _, ok := del[k]; ok {
r.owners.ClearLinuxNetDevice(id, k, plugin)
delete(create.Container.Linux.NetDevices, k)
r.reply.adjust.Linux.NetDevices[MarkForRemoval(k)] = nil
}
if err := r.owners.ClaimLinuxNetDevice(id, k, plugin); err != nil {
return err
}
create.Container.Linux.NetDevices[k] = v
r.reply.adjust.Linux.NetDevices[k] = v
delete(del, k)
}
for k := range del {
r.reply.adjust.Linux.NetDevices[MarkForRemoval(k)] = nil
}
return nil
}
func (r *result) updateResources(reply, u *ContainerUpdate, plugin string) error {
if u.Linux == nil || u.Linux.Resources == nil {
return nil
@@ -886,99 +1083,12 @@ func (r *result) updateResources(reply, u *ContainerUpdate, plugin string) error
resources = reply.Linux.Resources.Copy()
}
if mem := u.Linux.Resources.Memory; mem != nil {
if v := mem.GetLimit(); v != nil {
if err := r.owners.ClaimMemLimit(id, plugin); err != nil {
return err
}
resources.Memory.Limit = Int64(v.GetValue())
}
if v := mem.GetReservation(); v != nil {
if err := r.owners.ClaimMemReservation(id, plugin); err != nil {
return err
}
resources.Memory.Reservation = Int64(v.GetValue())
}
if v := mem.GetSwap(); v != nil {
if err := r.owners.ClaimMemSwapLimit(id, plugin); err != nil {
return err
}
resources.Memory.Swap = Int64(v.GetValue())
}
if v := mem.GetKernel(); v != nil {
if err := r.owners.ClaimMemKernelLimit(id, plugin); err != nil {
return err
}
resources.Memory.Kernel = Int64(v.GetValue())
}
if v := mem.GetKernelTcp(); v != nil {
if err := r.owners.ClaimMemTCPLimit(id, plugin); err != nil {
return err
}
resources.Memory.KernelTcp = Int64(v.GetValue())
}
if v := mem.GetSwappiness(); v != nil {
if err := r.owners.ClaimMemSwappiness(id, plugin); err != nil {
return err
}
resources.Memory.Swappiness = UInt64(v.GetValue())
}
if v := mem.GetDisableOomKiller(); v != nil {
if err := r.owners.ClaimMemDisableOomKiller(id, plugin); err != nil {
return err
}
resources.Memory.DisableOomKiller = Bool(v.GetValue())
}
if v := mem.GetUseHierarchy(); v != nil {
if err := r.owners.ClaimMemUseHierarchy(id, plugin); err != nil {
return err
}
resources.Memory.UseHierarchy = Bool(v.GetValue())
}
if err := r.adjustMemoryResource(u.Linux.Resources.Memory, resources.Memory, nil, id, plugin); err != nil {
return err
}
if cpu := u.Linux.Resources.Cpu; cpu != nil {
if v := cpu.GetShares(); v != nil {
if err := r.owners.ClaimCPUShares(id, plugin); err != nil {
return err
}
resources.Cpu.Shares = UInt64(v.GetValue())
}
if v := cpu.GetQuota(); v != nil {
if err := r.owners.ClaimCPUQuota(id, plugin); err != nil {
return err
}
resources.Cpu.Quota = Int64(v.GetValue())
}
if v := cpu.GetPeriod(); v != nil {
if err := r.owners.ClaimCPUPeriod(id, plugin); err != nil {
return err
}
resources.Cpu.Period = UInt64(v.GetValue())
}
if v := cpu.GetRealtimeRuntime(); v != nil {
if err := r.owners.ClaimCPURealtimeRuntime(id, plugin); err != nil {
return err
}
resources.Cpu.RealtimeRuntime = Int64(v.GetValue())
}
if v := cpu.GetRealtimePeriod(); v != nil {
if err := r.owners.ClaimCPURealtimePeriod(id, plugin); err != nil {
return err
}
resources.Cpu.RealtimePeriod = UInt64(v.GetValue())
}
if v := cpu.GetCpus(); v != "" {
if err := r.owners.ClaimCPUSetCPUs(id, plugin); err != nil {
return err
}
resources.Cpu.Cpus = v
}
if v := cpu.GetMems(); v != "" {
if err := r.owners.ClaimCPUSetMems(id, plugin); err != nil {
return err
}
resources.Cpu.Mems = v
}
if err := r.adjustCPUResource(u.Linux.Resources.Cpu, resources.Cpu, nil, id, plugin); err != nil {
return err
}
for _, l := range u.Linux.Resources.HugepageLimits {
@@ -1067,3 +1177,23 @@ func (r *result) getContainerUpdate(u *ContainerUpdate, plugin string) (*Contain
return update, nil
}
func (r *result) initAdjust() {
if r.reply.adjust == nil {
r.reply.adjust = &ContainerAdjustment{}
}
}
func (r *result) initAdjustLinux() {
r.initAdjust()
if r.reply.adjust.Linux == nil {
r.reply.adjust.Linux = &LinuxContainerAdjustment{}
}
}
func (r *result) initAdjustRdt() {
r.initAdjustLinux()
if r.reply.adjust.Linux.Rdt == nil {
r.reply.adjust.Linux.Rdt = &LinuxRdt{}
}
}

View File

@@ -0,0 +1,27 @@
//go:build nri_no_wasm
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package adaptation
import (
"github.com/containerd/nri/pkg/api"
)
func getWasmService() (*api.PluginPlugin, error) {
return nil, ErrWasmDisabled
}

View File

@@ -0,0 +1,51 @@
//go:build !nri_no_wasm
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package adaptation
import (
"context"
"fmt"
"github.com/containerd/nri/pkg/api"
"github.com/tetratelabs/wazero"
"github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1"
)
func getWasmService() (*api.PluginPlugin, error) {
wasmWithCloseOnContextDone := func(ctx context.Context) (wazero.Runtime, error) {
var (
cfg = wazero.NewRuntimeConfig().WithCloseOnContextDone(true)
r = wazero.NewRuntimeWithConfig(ctx, cfg)
)
if _, err := wasi_snapshot_preview1.Instantiate(ctx, r); err != nil {
return nil, err
}
return r, nil
}
wasmPlugins, err := api.NewPluginPlugin(
context.Background(),
api.WazeroRuntime(wasmWithCloseOnContextDone),
)
if err != nil {
return nil, fmt.Errorf("unable to initialize WASM service: %w", err)
}
return wasmPlugins, nil
}

View File

@@ -116,6 +116,7 @@ func (a *ContainerAdjustment) AddHooks(h *Hooks) {
}
}
// AddRlimit records the addition of rlimit (POSIX resource limits) to a container.
func (a *ContainerAdjustment) AddRlimit(typ string, hard, soft uint64) {
a.initRlimits()
a.Rlimits = append(a.Rlimits, &POSIXRlimit{
@@ -161,6 +162,24 @@ func (a *ContainerAdjustment) RemoveNamespace(n *LinuxNamespace) {
})
}
// AddLinuxNetDevice records the addition of the given network device to a container.
func (a *ContainerAdjustment) AddLinuxNetDevice(hostDev string, d *LinuxNetDevice) {
if d == nil {
return
}
a.initLinuxNetDevices()
a.Linux.NetDevices[hostDev] = d
}
// RemoveLinuxNetDevice records the removal of a network device from a container.
// Normally it is an error for a plugin to try and alter a network device
// touched by another container. However, this is not an error if
// the plugin removes that device prior to touching it.
func (a *ContainerAdjustment) RemoveLinuxNetDevice(hostDev string) {
a.initLinuxNetDevices()
a.Linux.NetDevices[MarkForRemoval(hostDev)] = nil
}
// SetLinuxMemoryLimit records setting the memory limit for a container.
func (a *ContainerAdjustment) SetLinuxMemoryLimit(value int64) {
a.initLinuxResourcesMemory()
@@ -279,6 +298,30 @@ func (a *ContainerAdjustment) SetLinuxRDTClass(value string) {
a.Linux.Resources.RdtClass = String(value)
}
// SetLinuxRDTClosID records setting the RDT CLOS id for a container.
func (a *ContainerAdjustment) SetLinuxRDTClosID(value string) {
a.initLinuxRdt()
a.Linux.Rdt.ClosId = String(value)
}
// SetLinuxRDTSchemata records setting the RDT schemata for a container.
func (a *ContainerAdjustment) SetLinuxRDTSchemata(value []string) {
a.initLinuxRdt()
a.Linux.Rdt.Schemata = RepeatedString(value)
}
// SetLinuxRDTEnableMonitoring records enabling RDT monitoring for a container.
func (a *ContainerAdjustment) SetLinuxRDTEnableMonitoring(value bool) {
a.initLinuxRdt()
a.Linux.Rdt.EnableMonitoring = Bool(value)
}
// RemoveLinuxRDT records the removal of the RDT configuration.
func (a *ContainerAdjustment) RemoveLinuxRDT() {
a.initLinuxRdt()
a.Linux.Rdt.Remove = true
}
// AddLinuxUnified sets a cgroupv2 unified resource.
func (a *ContainerAdjustment) AddLinuxUnified(key, value string) {
a.initLinuxResourcesUnified()
@@ -309,6 +352,21 @@ func (a *ContainerAdjustment) SetLinuxSeccompPolicy(seccomp *LinuxSeccomp) {
a.Linux.SeccompPolicy = seccomp
}
// SetLinuxSysctl records setting a sysctl for a container.
func (a *ContainerAdjustment) SetLinuxSysctl(key, value string) {
a.initLinux()
if a.Linux.Sysctl == nil {
a.Linux.Sysctl = make(map[string]string)
}
a.Linux.Sysctl[key] = value
}
// SetLinuxScheduler records setting the Linux scheduler attributes for a container.
func (a *ContainerAdjustment) SetLinuxScheduler(sch *LinuxScheduler) {
a.initLinux()
a.Linux.Scheduler = sch
}
//
// Initializing a container adjustment and container update.
//
@@ -378,3 +436,17 @@ func (a *ContainerAdjustment) initLinuxResourcesUnified() {
a.Linux.Resources.Unified = make(map[string]string)
}
}
func (a *ContainerAdjustment) initLinuxNetDevices() {
a.initLinux()
if a.Linux.NetDevices == nil {
a.Linux.NetDevices = make(map[string]*LinuxNetDevice)
}
}
func (a *ContainerAdjustment) initLinuxRdt() {
a.initLinux()
if a.Linux.Rdt == nil {
a.Linux.Rdt = &LinuxRdt{}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -329,6 +329,7 @@ message Container {
string status_reason = 18;
string status_message = 19;
repeated CDIDevice CDI_devices = 20;
User user = 21;
}
// Possible container states.
@@ -376,6 +377,10 @@ message LinuxContainer {
LinuxIOPriority io_priority = 6;
SecurityProfile seccomp_profile = 7;
LinuxSeccomp seccomp_policy = 8;
map<string, string> sysctl = 9;
map<string, LinuxNetDevice> net_devices = 10;
LinuxScheduler scheduler = 11;
LinuxRdt rdt = 12;
}
// A linux namespace.
@@ -409,6 +414,13 @@ message CDIDevice {
string name = 1;
}
// User and group IDs for the container.
message User {
uint32 uid = 1;
uint32 gid = 2;
repeated uint32 additional_gids = 3;
}
// Container (linux) resources.
message LinuxResources {
LinuxMemory memory = 1;
@@ -487,6 +499,44 @@ enum IOPrioClass {
IOPRIO_CLASS_IDLE = 3;
}
// A linux network device.
message LinuxNetDevice {
string name = 1;
}
// Linux process scheduling attributes.
message LinuxScheduler {
LinuxSchedulerPolicy policy = 1;
int32 nice = 2;
int32 priority = 3;
repeated LinuxSchedulerFlag flags = 4;
uint64 runtime = 5;
uint64 deadline = 6;
uint64 period = 7;
}
// Linux scheduling policies.
enum LinuxSchedulerPolicy {
SCHED_NONE = 0;
SCHED_OTHER = 1;
SCHED_FIFO = 2;
SCHED_RR = 3;
SCHED_BATCH = 4;
SCHED_ISO = 5;
SCHED_IDLE = 6;
SCHED_DEADLINE = 7;
}
enum LinuxSchedulerFlag {
SCHED_FLAG_RESET_ON_FORK = 0;
SCHED_FLAG_RECLAIM = 1;
SCHED_FLAG_DL_OVERRUN = 2;
SCHED_FLAG_KEEP_POLICY = 3;
SCHED_FLAG_KEEP_PARAMS = 4;
SCHED_FLAG_UTIL_CLAMP_MIN = 5;
SCHED_FLAG_UTIL_CLAMP_MAX = 6;
}
// Requested adjustments to a container being created.
message ContainerAdjustment {
map<string, string> annotations = 2;
@@ -508,6 +558,10 @@ message LinuxContainerAdjustment {
LinuxIOPriority io_priority = 5;
LinuxSeccomp seccomp_policy = 6;
repeated LinuxNamespace namespaces = 7;
map<string, string> sysctl = 8;
map<string,LinuxNetDevice> net_devices = 9;
LinuxScheduler scheduler = 10;
LinuxRdt rdt = 11;
}
message LinuxSeccomp {
@@ -554,6 +608,14 @@ message ContainerEviction {
string reason = 2;
}
message LinuxRdt {
OptionalString clos_id = 1;
OptionalRepeatedString schemata = 2;
OptionalBool enable_monitoring = 3;
// NRI specific field to mark the RDT config for removal.
bool remove = 4;
}
// KeyValue represents an environment variable.
message KeyValue {
string key = 1;
@@ -565,6 +627,11 @@ message OptionalString {
string value = 1;
}
// An optional collection of strings.
message OptionalRepeatedString {
repeated string value = 1;
}
// An optional signed integer value.
message OptionalInt {
int64 value = 1;
@@ -665,4 +732,11 @@ enum Field {
IoPriority = 31;
SeccompPolicy = 32;
Namespace = 33;
Sysctl = 34;
LinuxNetDevices = 35;
// protoc scoping rules: calling this LinuxScheduler would conflict with message.
LinuxSched = 36;
RdtClosID = 37;
RdtSchemata = 38;
RdtEnableMonitoring = 39;
}

View File

@@ -243,17 +243,17 @@ func RegisterHostFunctionsService(srv *ttrpc.Server, svc HostFunctionsService) {
})
}
type hostFunctionsClient struct {
type hostfunctionsClient struct {
client *ttrpc.Client
}
func NewHostFunctionsClient(client *ttrpc.Client) HostFunctionsService {
return &hostFunctionsClient{
return &hostfunctionsClient{
client: client,
}
}
func (c *hostFunctionsClient) Log(ctx context.Context, req *LogRequest) (*Empty, error) {
func (c *hostfunctionsClient) Log(ctx context.Context, req *LogRequest) (*Empty, error) {
var resp Empty
if err := c.client.Call(ctx, "nri.pkg.api.v1alpha1.HostFunctions", "Log", req, &resp); err != nil {
return nil, err

File diff suppressed because it is too large Load Diff

View File

@@ -18,6 +18,7 @@ package api
import "time"
// GetCreatedAtTime returns the time the container was created at as time.Time.
func (x *Container) GetCreatedAtTime() time.Time {
t := time.Time{}
if x != nil {
@@ -26,6 +27,7 @@ func (x *Container) GetCreatedAtTime() time.Time {
return t
}
// GetStartedAtTime returns the time the container was started at as time.Time.
func (x *Container) GetStartedAtTime() time.Time {
t := time.Time{}
if x != nil {
@@ -34,6 +36,7 @@ func (x *Container) GetStartedAtTime() time.Time {
return t
}
// GetFinishedAtTime returns the time the container was finished at as time.Time.
func (x *Container) GetFinishedAtTime() time.Time {
t := time.Time{}
if x != nil {

View File

@@ -26,7 +26,7 @@ const (
ValidEvents = EventMask((1 << (Event_LAST - 1)) - 1)
)
// nolint
//nolint:revive // exported type should have comment
type (
// Define *Request/*Response type aliases for *Event/Empty pairs.

View File

@@ -25,7 +25,7 @@ func (hooks *Hooks) Append(h *Hooks) *Hooks {
if h == nil {
return hooks
}
hooks.Prestart = append(hooks.Prestart, h.Prestart...)
hooks.Prestart = append(hooks.Prestart, h.Prestart...) //nolint:staticcheck // ignore SA1019: o.Prestart is deprecated
hooks.CreateRuntime = append(hooks.CreateRuntime, h.CreateRuntime...)
hooks.CreateContainer = append(hooks.CreateContainer, h.CreateContainer...)
hooks.StartContainer = append(hooks.StartContainer, h.StartContainer...)
@@ -79,7 +79,7 @@ func FromOCIHooks(o *rspec.Hooks) *Hooks {
return nil
}
return &Hooks{
Prestart: FromOCIHookSlice(o.Prestart),
Prestart: FromOCIHookSlice(o.Prestart), //nolint:staticcheck // ignore SA1019: o.Prestart is deprecated
CreateRuntime: FromOCIHookSlice(o.CreateRuntime),
CreateContainer: FromOCIHookSlice(o.CreateContainer),
StartContainer: FromOCIHookSlice(o.StartContainer),

View File

@@ -47,7 +47,7 @@ func (ioprio *LinuxIOPriority) ToOCI() *rspec.LinuxIOPriority {
}
}
// FromOCIIOPrioClass returns the IOPrioClass corresponding the the given
// FromOCIIOPriorityClass returns the IOPrioClass corresponding the the given
// OCI IOPriorityClass.
func FromOCIIOPriorityClass(o rspec.IOPriorityClass) IOPrioClass {
return IOPrioClass(IOPrioClass_value[string(o)])

View File

@@ -0,0 +1,107 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
rspec "github.com/opencontainers/runtime-spec/specs-go"
)
// FromOCILinuxScheduler returns a LinuxScheduler corresponding to the OCI
// Scheduler.
func FromOCILinuxScheduler(o *rspec.Scheduler) *LinuxScheduler {
if o == nil {
return nil
}
sch := &LinuxScheduler{
Policy: FromOCISchedulerPolicy(o.Policy),
Nice: o.Nice,
Priority: o.Priority,
Flags: FromOCILinuxSchedulerFlags(o.Flags),
Runtime: o.Runtime,
Deadline: o.Deadline,
Period: o.Period,
}
return sch
}
// ToOCI returns the OCI Scheduler corresponding to the LinuxScheduler.
func (sch *LinuxScheduler) ToOCI() *rspec.Scheduler {
if sch == nil {
return nil
}
if sch.Policy == LinuxSchedulerPolicy_SCHED_NONE {
return nil
}
return &rspec.Scheduler{
Policy: sch.Policy.ToOCI(),
Nice: sch.Nice,
Priority: sch.Priority,
Flags: ToOCILinuxSchedulerFlags(sch.Flags),
Runtime: sch.Runtime,
Deadline: sch.Deadline,
Period: sch.Period,
}
}
// FromOCISchedulerPolicy returns the SchedulerPolicy corresponding to the
// given OCI SchedulerPolicy.
func FromOCISchedulerPolicy(o rspec.LinuxSchedulerPolicy) LinuxSchedulerPolicy {
return LinuxSchedulerPolicy(LinuxSchedulerPolicy_value[string(o)])
}
// ToOCI returns the OCI SchedulerPolicy corresponding to the given
// SchedulerPolicy.
func (p LinuxSchedulerPolicy) ToOCI() rspec.LinuxSchedulerPolicy {
if p == LinuxSchedulerPolicy_SCHED_NONE {
return rspec.LinuxSchedulerPolicy("")
}
return rspec.LinuxSchedulerPolicy(LinuxSchedulerPolicy_name[int32(p)])
}
// FromOCILinuxSchedulerFlags returns the LinuxSchedulerFlags corresponding to
// the given OCI LinuxSchedulerFlags.
func FromOCILinuxSchedulerFlags(o []rspec.LinuxSchedulerFlag) []LinuxSchedulerFlag {
if o == nil {
return nil
}
flags := make([]LinuxSchedulerFlag, len(o))
for i, f := range o {
flags[i] = LinuxSchedulerFlag(LinuxSchedulerFlag_value[string(f)])
}
return flags
}
// ToOCILinuxSchedulerFlags returns the OCI LinuxSchedulerFlags corresponding
// to the LinuxSchedulerFlags.
func ToOCILinuxSchedulerFlags(f []LinuxSchedulerFlag) []rspec.LinuxSchedulerFlag {
if f == nil {
return nil
}
flags := make([]rspec.LinuxSchedulerFlag, len(f))
for i, f := range f {
flags[i] = rspec.LinuxSchedulerFlag(LinuxSchedulerFlag_name[int32(f)])
}
return flags
}

67
vendor/github.com/containerd/nri/pkg/api/net-device.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
rspec "github.com/opencontainers/runtime-spec/specs-go"
)
// FromOCILinuxNetDevice returns a LinuxNetDevice for the given OCI LinuxNetDevice.
func FromOCILinuxNetDevice(o rspec.LinuxNetDevice) *LinuxNetDevice {
return &LinuxNetDevice{
Name: o.Name,
}
}
// FromOCILinuxNetDevices returns LinuxNetDevice's for the given OCI LinuxNetDevice's.
func FromOCILinuxNetDevices(o map[string]rspec.LinuxNetDevice) map[string]*LinuxNetDevice {
if len(o) == 0 {
return nil
}
devices := make(map[string]*LinuxNetDevice, len(o))
for host, dev := range o {
devices[host] = FromOCILinuxNetDevice(dev)
}
return devices
}
// ToOCI returns the OCI LinuxNetDevice corresponding to the LinuxNetDevice.
func (d *LinuxNetDevice) ToOCI() rspec.LinuxNetDevice {
if d == nil {
return rspec.LinuxNetDevice{}
}
return rspec.LinuxNetDevice{
Name: d.Name,
}
}
// ToOCILinuxNetDevices returns the OCI LinuxNetDevice's corresponding to the LinuxNetDevice's.
func ToOCILinuxNetDevices(devices map[string]*LinuxNetDevice) map[string]rspec.LinuxNetDevice {
if devices == nil {
return nil
}
o := make(map[string]rspec.LinuxNetDevice, len(devices))
for host, dev := range devices {
o[host] = dev.ToOCI()
}
return o
}

View File

@@ -18,6 +18,7 @@ package api
import (
"os"
"slices"
)
//
@@ -71,6 +72,41 @@ func (o *OptionalString) Get() *string {
return &v
}
// RepeatedString creates an Optional wrapper from its argument.
func RepeatedString(v interface{}) *OptionalRepeatedString {
var value []string
switch o := v.(type) {
case []string:
value = o
case *[]string:
if o == nil {
return nil
}
value = *o
case *OptionalRepeatedString:
if o == nil {
return nil
}
value = o.Value
default:
return nil
}
return &OptionalRepeatedString{
Value: slices.Clone(value),
}
}
// Get returns nil if its value is unset or a pointer to a copy of the value.
func (o *OptionalRepeatedString) Get() *[]string {
if o == nil {
return nil
}
v := slices.Clone(o.Value)
return &v
}
// Int creates an Optional wrapper from its argument.
func Int(v interface{}) *OptionalInt {
var value int64

View File

@@ -14,6 +14,9 @@
limitations under the License.
*/
// TODO: Add comments to exported methods and functions.
//
//nolint:revive // exported symbols should have comments
package api
import (
@@ -145,6 +148,18 @@ func (o *OwningPlugins) ClaimRdtClass(id, plugin string) error {
return o.mustOwnersFor(id).ClaimRdtClass(plugin)
}
func (o *OwningPlugins) ClaimRdtClosID(id, plugin string) error {
return o.mustOwnersFor(id).ClaimRdtClosID(plugin)
}
func (o *OwningPlugins) ClaimRdtSchemata(id, plugin string) error {
return o.mustOwnersFor(id).ClaimRdtSchemata(plugin)
}
func (o *OwningPlugins) ClaimRdtEnableMonitoring(id, plugin string) error {
return o.mustOwnersFor(id).ClaimRdtEnableMonitoring(plugin)
}
func (o *OwningPlugins) ClaimCgroupsUnified(id, key, plugin string) error {
return o.mustOwnersFor(id).ClaimCgroupsUnified(key, plugin)
}
@@ -157,6 +172,10 @@ func (o *OwningPlugins) ClaimOomScoreAdj(id, plugin string) error {
return o.mustOwnersFor(id).ClaimOomScoreAdj(plugin)
}
func (o *OwningPlugins) ClaimLinuxScheduler(id, plugin string) error {
return o.mustOwnersFor(id).ClaimLinuxScheduler(plugin)
}
func (o *OwningPlugins) ClaimRlimit(id, typ, plugin string) error {
return o.mustOwnersFor(id).ClaimRlimit(typ, plugin)
}
@@ -169,6 +188,14 @@ func (o *OwningPlugins) ClaimSeccompPolicy(id, plugin string) error {
return o.mustOwnersFor(id).ClaimSeccompPolicy(plugin)
}
func (o *OwningPlugins) ClaimSysctl(id, key, plugin string) error {
return o.mustOwnersFor(id).ClaimSysctl(key, plugin)
}
func (o *OwningPlugins) ClaimLinuxNetDevice(id, path, plugin string) error {
return o.mustOwnersFor(id).ClaimLinuxNetDevice(path, plugin)
}
func (o *OwningPlugins) ClearAnnotation(id, key, plugin string) {
o.mustOwnersFor(id).ClearAnnotation(key, plugin)
}
@@ -189,6 +216,18 @@ func (o *OwningPlugins) ClearArgs(id, plugin string) {
o.mustOwnersFor(id).ClearArgs(plugin)
}
func (o *OwningPlugins) ClearSysctl(id, key, plugin string) {
o.mustOwnersFor(id).ClearSysctl(key, plugin)
}
func (o *OwningPlugins) ClearLinuxNetDevice(id, path, plugin string) {
o.mustOwnersFor(id).ClearLinuxNetDevice(path, plugin)
}
func (o *OwningPlugins) ClearRdt(id, plugin string) {
o.mustOwnersFor(id).ClearRdt(plugin)
}
func (o *OwningPlugins) AnnotationOwner(id, key string) (string, bool) {
return o.ownersFor(id).compoundOwner(Field_Annotations.Key(), key)
}
@@ -297,6 +336,18 @@ func (o *OwningPlugins) RdtClassOwner(id string) (string, bool) {
return o.ownersFor(id).simpleOwner(Field_RdtClass.Key())
}
func (o *OwningPlugins) RdtClosIDOwner(id string) (string, bool) {
return o.ownersFor(id).simpleOwner(Field_RdtClosID.Key())
}
func (o *OwningPlugins) RdtSchemataOwner(id string) (string, bool) {
return o.ownersFor(id).simpleOwner(Field_RdtSchemata.Key())
}
func (o *OwningPlugins) RdtEnableMonitoringOwner(id string) (string, bool) {
return o.ownersFor(id).simpleOwner(Field_RdtEnableMonitoring.Key())
}
func (o *OwningPlugins) CgroupsUnifiedOwner(id, key string) (string, bool) {
return o.ownersFor(id).compoundOwner(Field_CgroupsUnified.Key(), key)
}
@@ -309,6 +360,10 @@ func (o *OwningPlugins) OomScoreAdjOwner(id string) (string, bool) {
return o.ownersFor(id).simpleOwner(Field_OomScoreAdj.Key())
}
func (o *OwningPlugins) LinuxScheduler(id string) (string, bool) {
return o.ownersFor(id).simpleOwner(Field_LinuxSched.Key())
}
func (o *OwningPlugins) RlimitOwner(id, typ string) (string, bool) {
return o.ownersFor(id).compoundOwner(Field_Rlimits.Key(), typ)
}
@@ -321,6 +376,14 @@ func (o *OwningPlugins) SeccompPolicyOwner(id string) (string, bool) {
return o.ownersFor(id).simpleOwner(Field_SeccompPolicy.Key())
}
func (o *OwningPlugins) SysctlOwner(id, key string) (string, bool) {
return o.ownersFor(id).compoundOwner(Field_Sysctl.Key(), key)
}
func (o *OwningPlugins) LinuxNetDeviceOwner(id, path string) (string, bool) {
return o.ownersFor(id).compoundOwner(Field_LinuxNetDevices.Key(), path)
}
func (o *OwningPlugins) mustOwnersFor(id string) *FieldOwners {
f, ok := o.Owners[id]
if !ok {
@@ -519,6 +582,18 @@ func (f *FieldOwners) ClaimRdtClass(plugin string) error {
return f.claimSimple(Field_RdtClass.Key(), plugin)
}
func (f *FieldOwners) ClaimRdtClosID(plugin string) error {
return f.claimSimple(Field_RdtClosID.Key(), plugin)
}
func (f *FieldOwners) ClaimRdtSchemata(plugin string) error {
return f.claimSimple(Field_RdtSchemata.Key(), plugin)
}
func (f *FieldOwners) ClaimRdtEnableMonitoring(plugin string) error {
return f.claimSimple(Field_RdtEnableMonitoring.Key(), plugin)
}
func (f *FieldOwners) ClaimCgroupsUnified(key, plugin string) error {
return f.claimCompound(Field_CgroupsUnified.Key(), key, plugin)
}
@@ -531,6 +606,10 @@ func (f *FieldOwners) ClaimOomScoreAdj(plugin string) error {
return f.claimSimple(Field_OomScoreAdj.Key(), plugin)
}
func (f *FieldOwners) ClaimLinuxScheduler(plugin string) error {
return f.claimSimple(Field_LinuxSched.Key(), plugin)
}
func (f *FieldOwners) ClaimRlimit(typ, plugin string) error {
return f.claimCompound(Field_Rlimits.Key(), typ, plugin)
}
@@ -543,6 +622,14 @@ func (f *FieldOwners) ClaimSeccompPolicy(plugin string) error {
return f.claimSimple(Field_SeccompPolicy.Key(), plugin)
}
func (f *FieldOwners) ClaimSysctl(key, plugin string) error {
return f.claimCompound(Field_Sysctl.Key(), key, plugin)
}
func (f *FieldOwners) ClaimLinuxNetDevice(path, plugin string) error {
return f.claimCompound(Field_LinuxNetDevices.Key(), path, plugin)
}
func (f *FieldOwners) clearCompound(field int32, key, plugin string) {
m, ok := f.Compound[field]
if !ok {
@@ -577,6 +664,20 @@ func (f *FieldOwners) ClearArgs(plugin string) {
f.clearSimple(Field_Args.Key(), plugin)
}
func (f *FieldOwners) ClearSysctl(key, plugin string) {
f.clearCompound(Field_Sysctl.Key(), key, plugin)
}
func (f *FieldOwners) ClearLinuxNetDevice(key, plugin string) {
f.clearCompound(Field_LinuxNetDevices.Key(), key, plugin)
}
func (f *FieldOwners) ClearRdt(plugin string) {
f.clearSimple(Field_RdtClosID.Key(), plugin)
f.clearSimple(Field_RdtSchemata.Key(), plugin)
f.clearSimple(Field_RdtEnableMonitoring.Key(), plugin)
}
func (f *FieldOwners) Conflict(field int32, plugin, other string, qualifiers ...string) error {
return fmt.Errorf("plugins %q and %q both tried to set %s",
plugin, other, qualify(field, qualifiers...))
@@ -718,6 +819,14 @@ func (f *FieldOwners) RdtClassOwner() (string, bool) {
return f.simpleOwner(Field_RdtClass.Key())
}
func (f *FieldOwners) RdtSchemataOwner() (string, bool) {
return f.simpleOwner(Field_RdtSchemata.Key())
}
func (f *FieldOwners) RdtEnableMonitoringOwner() (string, bool) {
return f.simpleOwner(Field_RdtEnableMonitoring.Key())
}
func (f *FieldOwners) CgroupsUnifiedOwner(key string) (string, bool) {
return f.compoundOwner(Field_CgroupsUnified.Key(), key)
}

View File

@@ -51,6 +51,7 @@ func CheckPluginIndex(idx string) error {
if len(idx) != 2 {
return fmt.Errorf("invalid plugin index %q, must be 2 digits", idx)
}
//nolint:staticcheck // could apply De Morgan's law
if !('0' <= idx[0] && idx[0] <= '9') || !('0' <= idx[1] && idx[1] <= '9') {
return fmt.Errorf("invalid plugin index %q (not [0-9][0-9])", idx)
}

View File

@@ -22,6 +22,11 @@ import (
rspec "github.com/opencontainers/runtime-spec/specs-go"
)
const (
// UnlimitedPidsLimit indicates unlimited Linux PIDs limit.
UnlimitedPidsLimit = -1
)
// FromOCILinuxResources returns resources from an OCI runtime Spec.
func FromOCILinuxResources(o *rspec.LinuxResources, _ map[string]string) *LinuxResources {
if o == nil {
@@ -33,7 +38,7 @@ func FromOCILinuxResources(o *rspec.LinuxResources, _ map[string]string) *LinuxR
Limit: Int64(m.Limit),
Reservation: Int64(m.Reservation),
Swap: Int64(m.Swap),
Kernel: Int64(m.Kernel),
Kernel: Int64(m.Kernel), //nolint:staticcheck // ignore SA1019: m.Kernel is deprecated
KernelTcp: Int64(m.KernelTCP),
Swappiness: UInt64(m.Swappiness),
DisableOomKiller: Bool(m.DisableOOMKiller),
@@ -67,8 +72,9 @@ func FromOCILinuxResources(o *rspec.LinuxResources, _ map[string]string) *LinuxR
})
}
if p := o.Pids; p != nil {
l.Pids = &LinuxPids{
Limit: p.Limit,
l.Pids = &LinuxPids{}
if p.Limit != nil && *p.Limit != 0 {
l.Pids.Limit = *p.Limit
}
}
if len(o.Unified) != 0 {
@@ -134,8 +140,10 @@ func (r *LinuxResources) ToOCI() *rspec.LinuxResources {
})
}
if r.Pids != nil {
o.Pids = &rspec.LinuxPids{
Limit: r.Pids.Limit,
o.Pids = &rspec.LinuxPids{}
if r.Pids.Limit > UnlimitedPidsLimit {
limit := r.Pids.Limit
o.Pids.Limit = &limit
}
}
return o
@@ -189,6 +197,7 @@ func (r *LinuxResources) Copy() *LinuxResources {
}
o.BlockioClass = String(r.BlockioClass)
o.RdtClass = String(r.RdtClass)
for _, d := range r.Devices {
o.Devices = append(o.Devices, &LinuxDeviceCgroup{
Allow: d.Allow,
@@ -201,3 +210,15 @@ func (r *LinuxResources) Copy() *LinuxResources {
return o
}
// Copy creates a copy of the RDT configuration.
func (r *LinuxRdt) Copy() *LinuxRdt {
if r == nil {
return nil
}
return &LinuxRdt{
ClosId: String(r.ClosId),
Schemata: RepeatedString(r.Schemata),
EnableMonitoring: Bool(r.EnableMonitoring),
}
}

View File

@@ -20,6 +20,7 @@ import (
rspec "github.com/opencontainers/runtime-spec/specs-go"
)
// FromOCILinuxSeccomp converts an seccomp configuration from an OCI runtime spec.
func FromOCILinuxSeccomp(o *rspec.LinuxSeccomp) *LinuxSeccomp {
var errno *OptionalUInt32
if o.DefaultErrnoRet != nil {
@@ -47,6 +48,7 @@ func FromOCILinuxSeccomp(o *rspec.LinuxSeccomp) *LinuxSeccomp {
}
}
// FromOCILinuxSyscalls converts seccomp syscalls configuration from an OCI runtime spec.
func FromOCILinuxSyscalls(o []rspec.LinuxSyscall) []*LinuxSyscall {
syscalls := make([]*LinuxSyscall, len(o))
@@ -67,6 +69,7 @@ func FromOCILinuxSyscalls(o []rspec.LinuxSyscall) []*LinuxSyscall {
return syscalls
}
// FromOCILinuxSeccompArgs converts seccomp syscall args from an OCI runtime spec.
func FromOCILinuxSeccompArgs(o []rspec.LinuxSeccompArg) []*LinuxSeccompArg {
args := make([]*LinuxSeccompArg, len(o))
@@ -82,6 +85,7 @@ func FromOCILinuxSeccompArgs(o []rspec.LinuxSeccompArg) []*LinuxSeccompArg {
return args
}
// ToOCILinuxSyscalls converts seccomp syscalls configuration to an OCI runtime spec.
func ToOCILinuxSyscalls(o []*LinuxSyscall) []rspec.LinuxSyscall {
syscalls := make([]rspec.LinuxSyscall, len(o))
@@ -103,6 +107,7 @@ func ToOCILinuxSyscalls(o []*LinuxSyscall) []rspec.LinuxSyscall {
return syscalls
}
// ToOCILinuxSeccompArgs converts seccomp syscall args to an OCI runtime spec.
func ToOCILinuxSeccompArgs(o []*LinuxSeccompArg) []rspec.LinuxSeccompArg {
args := make([]rspec.LinuxSeccompArg, len(o))

View File

@@ -112,6 +112,10 @@ func (l *LinuxContainerAdjustment) Strip() *LinuxContainerAdjustment {
empty = false
}
if l.Rdt = l.Rdt.Strip(); l.Rdt != nil {
empty = false
}
if empty {
return nil
}
@@ -223,7 +227,7 @@ func (c *LinuxCPU) Strip() *LinuxCPU {
return nil
}
empty := true
empty := true //nolint:staticcheck // could merge conditional assignment below to variable definition
if c.Shares != nil {
empty = false
@@ -262,7 +266,7 @@ func (m *LinuxMemory) Strip() *LinuxMemory {
return nil
}
empty := true
empty := true //nolint:staticcheck // could merge conditional assignment below to variable definition
if m.Limit != nil {
empty = false
@@ -296,6 +300,23 @@ func (m *LinuxMemory) Strip() *LinuxMemory {
return m
}
// Strip empty fields from a linux RDT configuration, reducing a fully empty one
// to nil. Strip allows comparison of two sets of resources for semantic
// equality using go-cmp.
func (r *LinuxRdt) Strip() *LinuxRdt {
if r == nil {
return nil
}
switch {
case r.ClosId != nil, r.Schemata != nil, r.EnableMonitoring != nil:
// non-empty
return r
}
return nil
}
// Strip empty fields from a container update, reducing a fully empty one
// to nil. Strip allows comparison of two updates for semantic equality
// using go-cmp.

View File

@@ -16,8 +16,9 @@
package api
//nolint
// SetContainerId sets the id of the container to update.
//
//nolint:revive
func (u *ContainerUpdate) SetContainerId(id string) {
u.ContainerId = id
}

View File

@@ -20,6 +20,7 @@ import (
"fmt"
)
// AddPlugin records a plugin for the validation request.
func (v *ValidateContainerAdjustmentRequest) AddPlugin(name, index string) {
v.Plugins = append(v.Plugins, &PluginInstance{
Name: name,
@@ -27,15 +28,18 @@ func (v *ValidateContainerAdjustmentRequest) AddPlugin(name, index string) {
})
}
// AddResponse records the container adjustments and updates to validate from a CreateContainerResponse.
func (v *ValidateContainerAdjustmentRequest) AddResponse(rpl *CreateContainerResponse) {
v.Adjust = rpl.Adjust
v.Update = rpl.Update
}
// AddOwners sets the owning plugins for the container adjustment request.
func (v *ValidateContainerAdjustmentRequest) AddOwners(owners *OwningPlugins) {
v.Owners = owners
}
// ValidationResult returns the validation result as an error (non-nil if rejected).
func (v *ValidateContainerAdjustmentResponse) ValidationResult(plugin string) error {
if !v.Reject {
return nil
@@ -49,6 +53,7 @@ func (v *ValidateContainerAdjustmentResponse) ValidationResult(plugin string) er
return fmt.Errorf("validator %q rejected container adjustment, reason: %s", plugin, reason)
}
// GetPluginMap returns a map of plugin name to PluginInstance.
func (v *ValidateContainerAdjustmentRequest) GetPluginMap() map[string]*PluginInstance {
if v == nil {
return nil

View File

@@ -363,7 +363,6 @@ func (m *mux) setError(err error) {
})
}
// nolint
func (m *mux) error() error {
m.errOnce.Do(func() {
if m.err == nil {

View File

@@ -178,6 +178,9 @@ type Stub interface {
// This is the default timeout if the plugin has not been started or
// the timeout received in the Configure request otherwise.
RequestTimeout() time.Duration
// Logger returns the logger used by the stub.
Logger() nrilog.Logger
}
const (
@@ -188,9 +191,6 @@ const (
)
var (
// Logger for messages generated internally by the stub itself.
log = nrilog.Get()
// Used instead of a nil Context in logging.
noCtx = context.TODO()
@@ -268,6 +268,14 @@ func WithTTRPCOptions(clientOpts []ttrpc.ClientOpts, serverOpts []ttrpc.ServerOp
}
}
// WithLogger sets the logger to be used by the stub.
func WithLogger(logger nrilog.Logger) Option {
return func(s *stub) error {
s.logger = logger
return nil
}
}
// stub implements Stub.
type stub struct {
sync.Mutex
@@ -295,6 +303,7 @@ type stub struct {
registrationTimeout time.Duration
requestTimeout time.Duration
logger nrilog.Logger
}
// Handlers for NRI plugin event and request.
@@ -329,6 +338,7 @@ func New(p interface{}, opts ...Option) (Stub, error) {
registrationTimeout: DefaultRegistrationTimeout,
requestTimeout: DefaultRequestTimeout,
logger: nrilog.Get(),
}
for _, o := range opts {
@@ -345,7 +355,7 @@ func New(p interface{}, opts ...Option) (Stub, error) {
return nil, err
}
log.Infof(noCtx, "Created plugin %s (%s, handles %s)", stub.Name(),
stub.logger.Infof(noCtx, "Created plugin %s (%s, handles %s)", stub.Name(),
filepath.Base(os.Args[0]), stub.events.PrettyString())
return stub, nil
@@ -440,7 +450,7 @@ func (stub *stub) Start(ctx context.Context) (retErr error) {
return err
}
log.Infof(ctx, "Started plugin %s...", stub.Name())
stub.logger.Infof(ctx, "Started plugin %s...", stub.Name())
stub.started = true
return nil
@@ -448,7 +458,7 @@ func (stub *stub) Start(ctx context.Context) (retErr error) {
// Stop the plugin.
func (stub *stub) Stop() {
log.Infof(noCtx, "Stopping plugin %s...", stub.Name())
stub.logger.Infof(noCtx, "Stopping plugin %s...", stub.Name())
stub.Lock()
defer stub.Unlock()
@@ -504,7 +514,7 @@ func (stub *stub) Run(ctx context.Context) error {
err = <-stub.srvErrC
if err == ttrpc.ErrServerClosed {
log.Infof(noCtx, "ttrpc server closed %s : %v", stub.Name(), err)
stub.logger.Infof(noCtx, "ttrpc server closed %s : %v", stub.Name(), err)
}
return err
@@ -522,6 +532,10 @@ func (stub *stub) Name() string {
return stub.idx + "-" + stub.name
}
func (stub *stub) Logger() nrilog.Logger {
return stub.logger
}
func (stub *stub) RegistrationTimeout() time.Duration {
return stub.registrationTimeout
}
@@ -533,12 +547,12 @@ func (stub *stub) RequestTimeout() time.Duration {
// Connect the plugin to NRI.
func (stub *stub) connect() error {
if stub.conn != nil {
log.Infof(noCtx, "Using given plugin connection...")
stub.logger.Infof(noCtx, "Using given plugin connection...")
return nil
}
if env := os.Getenv(api.PluginSocketEnvVar); env != "" {
log.Infof(noCtx, "Using connection %q from environment...", env)
stub.logger.Infof(noCtx, "Using connection %q from environment...", env)
fd, err := strconv.Atoi(env)
if err != nil {
@@ -566,7 +580,7 @@ func (stub *stub) connect() error {
// Register the plugin with NRI.
func (stub *stub) register(ctx context.Context) error {
log.Infof(ctx, "Registering plugin %s...", stub.Name())
stub.logger.Infof(ctx, "Registering plugin %s...", stub.Name())
ctx, cancel := context.WithTimeout(ctx, stub.registrationTimeout)
defer cancel()
@@ -621,7 +635,7 @@ func (stub *stub) Configure(ctx context.Context, req *api.ConfigureRequest) (rpl
err error
)
log.Infof(ctx, "Configuring plugin %s for runtime %s/%s...", stub.Name(),
stub.logger.Infof(ctx, "Configuring plugin %s for runtime %s/%s...", stub.Name(),
req.RuntimeName, req.RuntimeVersion)
stub.registrationTimeout = time.Duration(req.RegistrationTimeout * int64(time.Millisecond))
@@ -636,7 +650,7 @@ func (stub *stub) Configure(ctx context.Context, req *api.ConfigureRequest) (rpl
} else {
events, err = handler(ctx, req.Config, req.RuntimeName, req.RuntimeVersion)
if err != nil {
log.Errorf(ctx, "Plugin configuration failed: %v", err)
stub.logger.Errorf(ctx, "Plugin configuration failed: %v", err)
return nil, err
}
@@ -646,13 +660,13 @@ func (stub *stub) Configure(ctx context.Context, req *api.ConfigureRequest) (rpl
// Only allow plugins to subscribe to events they can handle.
if extra := events & ^stub.events; extra != 0 {
log.Errorf(ctx, "Plugin subscribed for unhandled events %s (0x%x)",
stub.logger.Errorf(ctx, "Plugin subscribed for unhandled events %s (0x%x)",
extra.PrettyString(), extra)
return nil, fmt.Errorf("internal error: unhandled events %s (0x%x)",
extra.PrettyString(), extra)
}
log.Infof(ctx, "Subscribing plugin %s (%s) for events %s", stub.Name(),
stub.logger.Infof(ctx, "Subscribing plugin %s (%s) for events %s", stub.Name(),
filepath.Base(os.Args[0]), events.PrettyString())
}
@@ -679,7 +693,7 @@ func (stub *stub) collectSync(req *api.SynchronizeRequest) (*api.SynchronizeResp
stub.Lock()
defer stub.Unlock()
log.Debugf(noCtx, "collecting sync req with %d pods, %d containers...",
stub.logger.Debugf(noCtx, "collecting sync req with %d pods, %d containers...",
len(req.Pods), len(req.Containers))
if stub.syncReq == nil {

View File

@@ -26,6 +26,7 @@ import (
)
type (
// DefaultValidatorConfig is an alias for DefaultValidatorConfig from main package.
DefaultValidatorConfig = validator.DefaultValidatorConfig
)

View File

@@ -24,12 +24,14 @@ import (
"strconv"
"strings"
yaml "gopkg.in/yaml.v3"
"github.com/containerd/nri/pkg/api"
"github.com/containerd/nri/pkg/log"
"github.com/containerd/nri/pkg/plugin"
yaml "gopkg.in/yaml.v3"
)
// DefaultValidatorConfig is the configuration for the default validator plugin.
type DefaultValidatorConfig struct {
// Enable the default validator plugin.
Enable bool `yaml:"enable" toml:"enable"`
@@ -46,6 +48,8 @@ type DefaultValidatorConfig struct {
RejectCustomSeccompAdjustment bool `yaml:"rejectCustomSeccompAdjustment" toml:"reject_custom_seccomp_adjustment"`
// RejectNamespaceAdjustment fails validation if any plugin adjusts Linux namespaces.
RejectNamespaceAdjustment bool `yaml:"rejectNamespaceAdjustment" toml:"reject_namespace_adjustment"`
// RejectSysctlAdjustment fails validation if any plugin adjusts sysctls
RejectSysctlAdjustment bool `yaml:"rejectSysctlAdjustment" toml:"reject_sysctl_adjustment"`
// RequiredPlugins list globally required plugins. These must be present
// or otherwise validation will fail.
// WARNING: This is a global setting and will affect all containers. In
@@ -114,6 +118,11 @@ func (v *DefaultValidator) ValidateContainerAdjustment(ctx context.Context, req
return err
}
if err := v.validateSysctl(req); err != nil {
log.Errorf(ctx, "rejecting adjustment: %v", err)
return err
}
return nil
}
@@ -202,6 +211,31 @@ func (v *DefaultValidator) validateNamespaces(req *api.ValidateContainerAdjustme
ErrValidation, offenders)
}
func (v *DefaultValidator) validateSysctl(req *api.ValidateContainerAdjustmentRequest) error {
if req.Adjust == nil || req.Adjust.Linux == nil {
return nil
}
if !v.cfg.RejectSysctlAdjustment {
return nil
}
var owners []string
for key := range req.Adjust.Linux.Sysctl {
owner, claimed := req.Owners.SysctlOwner(req.Container.Id, key)
if !claimed {
continue
}
owners = append(owners, owner)
}
if len(owners) == 0 {
return nil
}
return fmt.Errorf("%w: attempted restricted sysctl adjustment by plugin(s) %s", ErrValidation, strings.Join(owners, ", "))
}
func (v *DefaultValidator) validateRequiredPlugins(req *api.ValidateContainerAdjustmentRequest) error {
var (
container = req.GetContainer().GetName()

4
vendor/modules.txt vendored
View File

@@ -492,8 +492,8 @@ github.com/containerd/go-runc
## explicit; go 1.20
github.com/containerd/log
github.com/containerd/log/logtest
# github.com/containerd/nri v0.10.0
## explicit; go 1.24.3
# github.com/containerd/nri v0.11.0
## explicit; go 1.24.0
github.com/containerd/nri/pkg/adaptation
github.com/containerd/nri/pkg/adaptation/builtin
github.com/containerd/nri/pkg/api