Skip to content
This repository has been archived by the owner on May 12, 2021. It is now read-only.

Commit

Permalink
device: use devicemanager to manage rootfs block
Browse files Browse the repository at this point in the history
Fixes #635

When container rootfs is block based in devicemapper use case, we can re-use
sandbox device manager to manage rootfs block plug/unplug, we don't detailed
description of block in container state file, instead we only need a Block index
referencing sandbox device.

Remove `HotpluggedDrive` and `RootfsPCIAddr` from state file because it's not
necessary any more.

Signed-off-by: Wei Zhang <[email protected]>
  • Loading branch information
WeiZhang555 committed Aug 31, 2018
1 parent e599ef4 commit c518b1e
Show file tree
Hide file tree
Showing 5 changed files with 98 additions and 90 deletions.
96 changes: 44 additions & 52 deletions virtcontainers/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,28 +342,6 @@ func (c *Container) setStateFstype(fstype string) error {
return nil
}

func (c *Container) setStateHotpluggedDrive(hotplugged bool) error {
c.state.HotpluggedDrive = hotplugged

err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state)
if err != nil {
return err
}

return nil
}

func (c *Container) setContainerRootfsPCIAddr(addr string) error {
c.state.RootfsPCIAddr = addr

err := c.sandbox.storage.storeContainerResource(c.sandbox.id, c.id, stateFileType, c.state)
if err != nil {
return err
}

return nil
}

// GetAnnotations returns container's annotations
func (c *Container) GetAnnotations() map[string]string {
return c.config.Annotations
Expand Down Expand Up @@ -1080,38 +1058,43 @@ func (c *Container) hotplugDrive() error {
return err
}

devicePath, err = filepath.EvalSymlinks(devicePath)
if err != nil {
return err
}

c.Logger().WithFields(logrus.Fields{
"device-path": devicePath,
"fs-type": fsType,
}).Info("Block device detected")

driveIndex, err := c.sandbox.getAndSetSandboxBlockIndex()
if err != nil {
return err
var stat unix.Stat_t
if err := unix.Stat(devicePath, &stat); err != nil {
return fmt.Errorf("stat %q failed: %v", devicePath, err)
}

// TODO: use general device manager instead of BlockDrive directly
// Add drive with id as container id
devID := utils.MakeNameID("drive", c.id, maxDevIDSize)
drive := config.BlockDrive{
File: devicePath,
Format: "raw",
ID: devID,
Index: driveIndex,
}

if _, err := c.sandbox.hypervisor.hotplugAddDevice(&drive, blockDev); err != nil {
return err
}
if c.checkBlockDeviceSupport() && stat.Mode&unix.S_IFBLK == unix.S_IFBLK {
b, err := c.sandbox.devManager.NewDevice(config.DeviceInfo{
HostPath: devicePath,
ContainerPath: filepath.Join(kataGuestSharedDir, c.id),
DevType: "b",
Major: int64(unix.Major(stat.Rdev)),
Minor: int64(unix.Minor(stat.Rdev)),
})
if err != nil {
return fmt.Errorf("device manager failed to create rootfs device for %q: %v", devicePath, err)
}

if drive.PCIAddr != "" {
c.setContainerRootfsPCIAddr(drive.PCIAddr)
}
c.state.BlockDeviceID = b.DeviceID()

c.setStateHotpluggedDrive(true)
// attach rootfs device
if err := c.sandbox.devManager.AttachDevice(b.DeviceID(), c.sandbox); err != nil {
return err
}

if err := c.setStateBlockIndex(driveIndex); err != nil {
return err
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}

return c.setStateFstype(fsType)
Expand All @@ -1126,19 +1109,28 @@ func (c *Container) isDriveUsed() bool {
}

func (c *Container) removeDrive() (err error) {
if c.isDriveUsed() && c.state.HotpluggedDrive {
if c.isDriveUsed() {
c.Logger().Info("unplugging block device")

devID := utils.MakeNameID("drive", c.id, maxDevIDSize)
drive := &config.BlockDrive{
ID: devID,
devID := c.state.BlockDeviceID
err := c.sandbox.devManager.DetachDevice(devID, c.sandbox)
if err != nil && err != manager.ErrDeviceNotAttached {
return err
}

l := c.Logger().WithField("device-id", devID)
l.Info("Unplugging block device")
if err = c.sandbox.devManager.RemoveDevice(devID); err != nil {
c.Logger().WithFields(logrus.Fields{
"container": c.id,
"device-id": devID,
}).WithError(err).Error("remove device failed")

// ignore the device not exist error
if err != manager.ErrDeviceNotExist {
return err
}
}

if _, err := c.sandbox.hypervisor.hotplugRemoveDevice(drive, blockDev); err != nil {
l.WithError(err).Info("Failed to unplug block device")
if err := c.sandbox.storeSandboxDevices(); err != nil {
return err
}
}
Expand Down
54 changes: 36 additions & 18 deletions virtcontainers/container_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
package virtcontainers

import (
"context"
"io/ioutil"
"os"
"os/exec"
Expand All @@ -15,6 +16,10 @@ import (
"syscall"
"testing"

"github.com/kata-containers/runtime/virtcontainers/device/api"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
"github.com/kata-containers/runtime/virtcontainers/device/manager"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
"github.com/stretchr/testify/assert"
)
Expand Down Expand Up @@ -83,7 +88,11 @@ func TestContainerSandbox(t *testing.T) {
}

func TestContainerRemoveDrive(t *testing.T) {
sandbox := &Sandbox{}
sandbox := &Sandbox{
id: "sandbox",
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
storage: &filesystem{},
}

container := Container{
sandbox: sandbox,
Expand All @@ -95,26 +104,35 @@ func TestContainerRemoveDrive(t *testing.T) {

// hotplugRemoveDevice for hypervisor should not be called.
// test should pass without a hypervisor created for the container's sandbox.
if err != nil {
t.Fatal("")
}
assert.Nil(t, err, "remove drive should succeed")

container.state.Fstype = "xfs"
container.state.HotpluggedDrive = false
err = container.removeDrive()

// hotplugRemoveDevice for hypervisor should not be called.
sandbox.hypervisor = &mockHypervisor{}
path := "/dev/hda"
deviceInfo := config.DeviceInfo{
HostPath: path,
ContainerPath: path,
DevType: "b",
}
devReceiver := &api.MockDeviceReceiver{}

device, err := sandbox.devManager.NewDevice(deviceInfo)
assert.Nil(t, err)
_, ok := device.(*drivers.BlockDevice)
assert.True(t, ok)
err = device.Attach(devReceiver)
assert.Nil(t, err)
err = sandbox.storage.createAllResources(context.Background(), sandbox)
if err != nil {
t.Fatal("")
t.Fatal(err)
}

container.state.HotpluggedDrive = true
sandbox.hypervisor = &mockHypervisor{}
err = container.removeDrive()
err = sandbox.storeSandboxDevices()
assert.Nil(t, err)

if err != nil {
t.Fatal()
}
container.state.Fstype = "xfs"
container.state.BlockDeviceID = device.DeviceID()
err = container.removeDrive()
assert.Nil(t, err, "remove drive should succeed")
}

func testSetupFakeRootfs(t *testing.T) (testRawFile, loopDev, mntDir string, err error) {
Expand Down Expand Up @@ -197,6 +215,7 @@ func TestContainerAddDriveDir(t *testing.T) {
fs := &filesystem{}
sandbox := &Sandbox{
id: testSandboxID,
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
storage: fs,
hypervisor: &mockHypervisor{},
agent: &noopAgent{},
Expand Down Expand Up @@ -243,14 +262,13 @@ func TestContainerAddDriveDir(t *testing.T) {
}()

container.state.Fstype = ""
container.state.HotpluggedDrive = false

err = container.hotplugDrive()
if err != nil {
t.Fatalf("Error with hotplugDrive :%v", err)
}

if container.state.Fstype == "" || !container.state.HotpluggedDrive {
if container.state.Fstype == "" {
t.Fatal()
}
}
Expand Down
27 changes: 15 additions & 12 deletions virtcontainers/kata_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ func (k *kataAgent) rollbackFailingContainerCreation(c *Container) {
}

func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPathParent string) (*grpc.Storage, error) {
if c.state.Fstype != "" {
if c.state.Fstype != "" && c.state.BlockDeviceID != "" {
// The rootfs storage volume represents the container rootfs
// mount point inside the guest.
// It can be a block based device (when using block based container
Expand All @@ -775,23 +775,25 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat
rootfs := &grpc.Storage{}

// This is a block based device rootfs.
device := sandbox.devManager.GetDeviceByID(c.state.BlockDeviceID)
if device == nil {
k.Logger().WithField("device", c.state.BlockDeviceID).Error("failed to find device by id")
return nil, fmt.Errorf("failed to find device by id %q", c.state.BlockDeviceID)
}

blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || blockDrive == nil {
k.Logger().Error("malformed block drive")
return nil, fmt.Errorf("malformed block drive")
}

// Pass a drive name only in case of virtio-blk driver.
// If virtio-scsi driver, the agent will be able to find the
// device based on the provided address.
if sandbox.config.HypervisorConfig.BlockDeviceDriver == VirtioBlock {
rootfs.Driver = kataBlkDevType
rootfs.Source = c.state.RootfsPCIAddr
rootfs.Source = blockDrive.VirtPath
} else {
scsiAddr, err := utils.GetSCSIAddress(c.state.BlockIndex)
if err != nil {
return nil, err
}

rootfs.Driver = kataSCSIDevType
rootfs.Source = scsiAddr
rootfs.Source = blockDrive.SCSIAddr
}

rootfs.MountPoint = rootPathParent
rootfs.Fstype = c.state.Fstype

Expand All @@ -801,6 +803,7 @@ func (k *kataAgent) buildContainerRootfs(sandbox *Sandbox, c *Container, rootPat

return rootfs, nil
}

// This is not a block based device rootfs.
// We are going to bind mount it into the 9pfs
// shared drive between the host and the guest.
Expand Down
7 changes: 1 addition & 6 deletions virtcontainers/sandbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,18 +54,13 @@ const (
type State struct {
State stateString `json:"state"`

BlockDeviceID string
// Index of the block device passed to hypervisor.
BlockIndex int `json:"blockIndex"`

// File system of the rootfs incase it is block device
Fstype string `json:"fstype"`

// Bool to indicate if the drive for a container was hotplugged.
HotpluggedDrive bool `json:"hotpluggedDrive"`

// PCI slot at which the block device backing the container rootfs is attached.
RootfsPCIAddr string `json:"rootfsPCIAddr"`

// Pid is the process id of the sandbox container which is the first
// container to be started.
Pid int `json:"pid"`
Expand Down
4 changes: 2 additions & 2 deletions virtcontainers/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -200,12 +200,12 @@ func (v *VM) ReseedRNG() error {
data := make([]byte, 512)
f, err := os.OpenFile(urandomDev, os.O_RDONLY, 0)
if err != nil {
v.logger().WithError(err).Warn("fail to open %s", urandomDev)
v.logger().WithError(err).Warnf("fail to open %s", urandomDev)
return err
}
defer f.Close()
if _, err = f.Read(data); err != nil {
v.logger().WithError(err).Warn("fail to read %s", urandomDev)
v.logger().WithError(err).Warnf("fail to read %s", urandomDev)
return err
}

Expand Down

0 comments on commit c518b1e

Please sign in to comment.