kata container mount and umount

 

 

func setupStorages(sandbox *Sandbox) []*grpc.Storage {
        storages := []*grpc.Storage{}
        caps := sandbox.hypervisor.capabilities()

        // append 9p shared volume to storages only if filesystem sharing is supported
        if caps.IsFsSharingSupported() {
                // We mount the shared directory in a predefined location
                // in the guest.
                // This is where at least some of the host config files
                // (resolv.conf, etc...) and potentially all container
                // rootfs will reside.
                if sandbox.config.HypervisorConfig.SharedFS == config.VirtioFS {
                        // If virtio-fs uses either of the two cache options 'auto, always',
                        // the guest directory can be mounted with option 'dax' allowing it to
                        // directly map contents from the host. When set to 'none', the mount
                        // options should not contain 'dax' lest the virtio-fs daemon crashing
                        // with an invalid address reference.
                        if sandbox.config.HypervisorConfig.VirtioFSCache != typeVirtioFSNoCache {
                                // If virtio_fs_cache_size = 0, dax should not be used.
                                if sandbox.config.HypervisorConfig.VirtioFSCacheSize != 0 {
                                        sharedDirVirtioFSOptions = append(sharedDirVirtioFSOptions, sharedDirVirtioFSDaxOptions)
                                }
                        }
                        sharedVolume := &grpc.Storage{
                                Driver:     kataVirtioFSDevType,
                                Source:     mountGuestTag,
                                MountPoint: kataGuestSharedDir(),
                                Fstype:     typeVirtioFS,
                                Options:    sharedDirVirtioFSOptions,
                        }

                        storages = append(storages, sharedVolume)
                }

 

 

// handleEphemeralStorage handles ephemeral storages by
// creating a Storage from corresponding source of the mount point
func (k *kataAgent) handleEphemeralStorage(mounts []specs.Mount) []*grpc.Storage {
        var epheStorages []*grpc.Storage
        for idx, mnt := range mounts {
                if mnt.Type == KataEphemeralDevType {
                        // Set the mount source path to a path that resides inside the VM
                        mounts[idx].Source = filepath.Join(ephemeralPath(), filepath.Base(mnt.Source))
                        // Set the mount type to "bind"
                        mounts[idx].Type = "bind"

                        // Create a storage struct so that kata agent is able to create
                        // tmpfs backed volume inside the VM
                        epheStorage := &grpc.Storage{
                                Driver:     KataEphemeralDevType,
                                Source:     "tmpfs",
                                Fstype:     "tmpfs",
                                MountPoint: mounts[idx].Source,
                        }
                        epheStorages = append(epheStorages, epheStorage)
                }
        }
        return epheStorages
}

 

 

               // Add the block device to the list of container devices, to make sure the
                // device is detached with detachDevices() for a container.
                c.devices = append(c.devices, ContainerDevice{ID: id, ContainerPath: m.Destination})

                var vol *grpc.Storage

                device := c.sandbox.devManager.GetDeviceByID(id)
                if device == nil {
                        k.Logger().WithField("device", id).Error("failed to find device by id")
                        return nil, fmt.Errorf("Failed to find device by id (id=%s)", id)
                }

                var err error
                switch device.DeviceType() {
                case config.DeviceBlock:
                        vol, err = k.handleDeviceBlockVolume(c, device)
                case config.VhostUserBlk:
                        vol, err = k.handleVhostUserBlkVolume(c, device)
                default:
                        k.Logger().Error("Unknown device type")
                        continue
                }

                if vol == nil || err != nil {
                        return nil, err
                }

                vol.MountPoint = m.Destination
                if vol.Fstype == "" {
                        vol.Fstype = "bind"
                }
                if len(vol.Options) == 0 {
                        vol.Options = []string{"bind"}
                }

                volumeStorages = append(volumeStorages, vol)
        }

        return volumeStorages, nil
}

 

 

func ephemeralStorageHandler(_ context.Context, storage pb.Storage, s *sandbox) (string, error) {
        s.Lock()
        defer s.Unlock()
        newStorage := s.setSandboxStorage(storage.MountPoint)

        if newStorage {
                var err error
                if err = os.MkdirAll(storage.MountPoint, os.ModePerm); err == nil {
                        _, err = commonStorageHandler(storage)
                }
                return "", err
        }
        return "", nil
}

 

// mountStorage performs the mount described by the storage structure.
func mountStorage(storage pb.Storage) error {
        flags, options := parseMountFlagsAndOptions(storage.Options)

        return mount(storage.Source, storage.MountPoint, storage.Fstype, flags, options)
}

 

 

 

// storageHandler is the type of callback to be defined to handle every
// type of storage driver.
type storageHandler func(ctx context.Context, storage pb.Storage, s *sandbox) (string, error)

// storageHandlerList lists the supported drivers.
var storageHandlerList = map[string]storageHandler{
        driver9pType:        virtio9pStorageHandler,
        driverVirtioFSType:  virtioFSStorageHandler,
        driverBlkType:       virtioBlkStorageHandler,
        driverBlkCCWType:    virtioBlkCCWStorageHandler,
        driverMmioBlkType:   virtioMmioBlkStorageHandler,
        driverSCSIType:      virtioSCSIStorageHandler,
        driverEphemeralType: ephemeralStorageHandler,
        driverLocalType:     localStorageHandler,
        driverNvdimmType:    nvdimmStorageHandler,
}

 

// virtioBlkStorageHandler handles the storage for blk driver.
func virtioBlkStorageHandler(_ context.Context, storage pb.Storage, s *sandbox) (string, error) {

        // If hot-plugged, get the device node path based on the PCI address else
        // use the virt path provided in Storage Source
        if strings.HasPrefix(storage.Source, "/dev") {

                FileInfo, err := os.Stat(storage.Source)
                if err != nil {
                        return "", err

                }
                // Make sure the virt path is valid
                if FileInfo.Mode()&os.ModeDevice == 0 {
                        return "", fmt.Errorf("invalid device %s", storage.Source)
                }

        } else {
                devPath, err := getPCIDeviceName(s, storage.Source)
                if err != nil {
                        return "", err
                }

                storage.Source = devPath
        }

        return commonStorageHandler(storage)
}

 

func commonStorageHandler(storage pb.Storage) (string, error) {
        // Mount the storage device.
        if err := mountStorage(storage); err != nil {
                return "", err
        }

        return storage.MountPoint, nil
}

 

// mountStorage performs the mount described by the storage structure.
func mountStorage(storage pb.Storage) error {
        flags, options := parseMountFlagsAndOptions(storage.Options)

        return mount(storage.Source, storage.MountPoint, storage.Fstype, flags, options)
}

 

     mountList, err := addStorages(ctx, req.Storages, a.sandbox)
        if err != nil {
                return emptyResp, err
        }

        ctr := &container{
                id:              req.ContainerId,
                processes:       make(map[string]*process),
                mounts:          mountList,   //保存在mounts,后续remove
                useSandboxPidNs: req.SandboxPidns,
                agentPidNs:      req.AgentPidns,
                ctx:             ctx,
        }

 

 

mounts

func (a *agentGRPC) RemoveContainer(ctx context.Context, req *pb.RemoveContainerRequest) (*gpb.Empty, error) {
        ctr, err := a.sandbox.getContainer(req.ContainerId)
        if err != nil {
                return emptyResp, err
        }

        timeout := int(req.Timeout)

        a.sandbox.Lock()
        defer a.sandbox.Unlock()

        if timeout == 0 {
                if err := ctr.removeContainer(); err != nil {
                        return emptyResp, err
                }

                // Find the sandbox storage used by this container
                for _, path := range ctr.mounts {
                        if _, ok := a.sandbox.storages[path]; ok {
                                if err := a.sandbox.unsetAndRemoveSandboxStorage(path); err != nil {
                                        return emptyResp, err
                                }
                        }
                }
        }

 

 

func (k *kataAgent) createContainer(sandbox *Sandbox, c *Container) (p *Process, err error) {
        span, _ := k.trace("createContainer")
        defer span.Finish()

        var ctrStorages []*grpc.Storage
        var ctrDevices []*grpc.Device
        var rootfs *grpc.Storage

        // This is the guest absolute root path for that container.
        rootPathParent := filepath.Join(kataGuestSharedDir(), c.id)
        rootPath := filepath.Join(rootPathParent, c.rootfsSuffix)

        // In case the container creation fails, the following defer statement
        // takes care of rolling back actions previously performed.
        defer func() {
                if err != nil {
                        k.Logger().WithError(err).Error("createContainer failed")
                        k.rollbackFailingContainerCreation(c)
                }
        }()

        if rootfs, err = k.buildContainerRootfs(sandbox, c, rootPathParent); err != nil {
                return nil, err
        } else if rootfs != nil {
                // Add rootfs to the list of container storage.
                // We only need to do this for block based rootfs, as we
                // want the agent to mount it into the right location
                // (kataGuestSharedDir/ctrID/
                ctrStorages = append(ctrStorages, rootfs)
        }

        ociSpec := c.GetPatchedOCISpec()
        if ociSpec == nil {
                return nil, errorMissingOCISpec
        }

        // Handle container mounts
        newMounts, ignoredMounts, err := c.mountSharedDirMounts(getMountPath(sandbox.id), kataGuestSharedDir())
    if err != nil {
                return nil, err
        }

        k.handleShm(ociSpec.Mounts, sandbox)

        epheStorages := k.handleEphemeralStorage(ociSpec.Mounts)
        ctrStorages = append(ctrStorages, epheStorages...)

        localStorages := k.handleLocalStorage(ociSpec.Mounts, sandbox.id, c.rootfsSuffix)
        ctrStorages = append(ctrStorages, localStorages...)

        // We replace all OCI mount sources that match our container mount
        // with the right source path (The guest one).
        if err = k.replaceOCIMountSource(ociSpec, newMounts); err != nil {
                return nil, err
        }

        // Remove all mounts that should be ignored from the spec
        if err = k.removeIgnoredOCIMount(ociSpec, ignoredMounts); err != nil {
                return nil, err
        }

        // Append container devices for block devices passed with --device.
        ctrDevices = k.appendDevices(ctrDevices, c)

        // Handle all the volumes that are block device files.
        // Note this call modifies the list of container devices to make sure
        // all hotplugged devices are unplugged, so this needs be done
        // after devices passed with --device are handled.
        volumeStorages, err := k.handleBlockVolumes(c)
        if err != nil {
                return nil, err
        }
        if err := k.replaceOCIMountsForStorages(ociSpec, volumeStorages); err != nil {
                return nil, err
        }

 

posted on 2020-12-03 11:39  tycoon3  阅读(239)  评论(0)    收藏  举报

导航