const (
driver9pType = "9p"
driverVirtioFSType = "virtio-fs"
driverBlkType = "blk"
driverBlkCCWType = "blk-ccw"
driverMmioBlkType = "mmioblk"
driverSCSIType = "scsi"
driverNvdimmType = "nvdimm"
driverEphemeralType = "ephemeral"
driverLocalType = "local"
vmRootfs = "/"
)
Access to devices on the host
If you need to directly expose some host devices to a container, you can use the devices parameter in the host_config param in Client.create_container as shown below:
cli.create_container(
'busybox', 'true', host_config=cli.create_host_config(devices=[
'/dev/sda:/dev/xvda:rwm'
])
)
Each string is a single mapping using the following format: <path_on_host>:<path_in_container>:<cgroup_permissions> The above example allows the container to have read-write access to the host's /dev/sda via a node named /dev/xvda inside the container.
As a more verbose alternative, each host device definition can be specified as a dictionary with the following keys:
{
'PathOnHost': '/dev/sda1',
'PathInContainer': '/dev/xvda',
'CgroupPermissions': 'rwm'
}
// Stores a mapping of device names (in host / outer container naming)
// to the device and resources slots in a container spec
type devIndexEntry struct {
idx int
func makeDevIndex(spec *pb.Spec) devIndex {
devIdx := make(devIndex)
if spec == nil || spec.Linux == nil || spec.Linux.Devices == nil {
return devIdx
}
for i, d := range spec.Linux.Devices {
rIdx := make([]int, 0)
if spec.Linux.Resources != nil && spec.Linux.Resources.Devices != nil {
for j, r := range spec.Linux.Resources.Devices {
if r.Type == d.Type && r.Major == d.Major && r.Minor == d.Minor {
rIdx = append(rIdx, j)
}
}
}
devIdx[d.Path] = devIndexEntry{
idx: i,
resourceIdx: rIdx,
}
}
return devIdx
}
func (a *agentGRPC) CreateContainer(ctx context.Context, req *pb.CreateContainerRequest) (resp *gpb.Empty, err error) {
if err := a.createContainerChecks(req); err != nil {
return emptyResp, err
}
// re-scan PCI bus
// looking for hidden devices
if err = rescanPciBus(); err != nil {
agentLog.WithError(err).Warn("Could not rescan PCI bus")
}
// Some devices need some extra processing (the ones invoked with
// --device for instance), and that's what this call is doing. It
// updates the devices listed in the OCI spec, so that they actually
// match real devices inside the VM. This step is necessary since we
// cannot predict everything from the caller.
if err = addDevices(ctx, req.Devices, req.OCI, a.sandbox); err != nil {
return emptyResp, err
}
func addDevices(ctx context.Context, devices []*pb.Device, spec *pb.Spec, s *sandbox) error {
devIdx := makeDevIndex(spec)
for _, device := range devices {
if device == nil {
continue
}
err := addDevice(ctx, device, spec, s, devIdx)
if err != nil {
return err
}
}
return nil
}
func addDevice(ctx context.Context, device *pb.Device, spec *pb.Spec, s *sandbox, devIdx devIndex) error {
if device == nil {
return grpcStatus.Error(codes.InvalidArgument, "invalid device")
}
if spec == nil {
return grpcStatus.Error(codes.InvalidArgument, "invalid spec")
}
// log before validation to help with debugging gRPC protocol
// version differences.
agentLog.WithFields(logrus.Fields{
"device-id": device.Id,
"device-type": device.Type,
"device-vm-path": device.VmPath,
"device-container-path": device.ContainerPath,
"device-options": device.Options,
}).Debug()
if device.Type == "" {
return grpcStatus.Errorf(codes.InvalidArgument,
"invalid type for device %v", device)
}
if device.Id == "" && device.VmPath == "" {
return grpcStatus.Errorf(codes.InvalidArgument,
"invalid ID and VM path for device %v", device)
}
if device.ContainerPath == "" {
return grpcStatus.Errorf(codes.InvalidArgument,
"invalid container path for device %v", device)
}
devHandler, ok := deviceHandlerList[device.Type]
if !ok {
return grpcStatus.Errorf(codes.InvalidArgument,
"Unknown device type %q", device.Type)
}
return devHandler(ctx, *device, spec, s, devIdx)
}
type deviceHandler func(ctx context.Context, device pb.Device, spec *pb.Spec, s *sandbox, devIdx devIndex) error
var deviceHandlerList = map[string]deviceHandler{
driverMmioBlkType: virtioMmioBlkDeviceHandler,
driverBlkType: virtioBlkDeviceHandler,
driverBlkCCWType: virtioBlkCCWDeviceHandler,
driverSCSIType: virtioSCSIDeviceHandler,
driverNvdimmType: nvdimmDeviceHandler,
}