Skip to content
This repository has been archived by the owner on May 12, 2021. It is now read-only.

Commit

Permalink
agent: append and handle vhost-user-blk device
Browse files Browse the repository at this point in the history
Kata-runtime can append vhost-user-blk device to the
device list of a container. And handle volumes who is
a block device and in VhostUserBlk type.

The vhost-user-blk device will be identified by its
PCI address by Kata-agent inside VM.

Fixes: #2380

Signed-off-by: Liu Xiaodong <[email protected]>
  • Loading branch information
dong-liuliu committed Mar 12, 2020
1 parent cf066b7 commit 126fa15
Show file tree
Hide file tree
Showing 2 changed files with 248 additions and 49 deletions.
169 changes: 122 additions & 47 deletions virtcontainers/kata_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
aTypes "github.com/kata-containers/agent/pkg/types"
kataclient "github.com/kata-containers/agent/protocols/client"
"github.com/kata-containers/agent/protocols/grpc"
"github.com/kata-containers/runtime/virtcontainers/device/api"
"github.com/kata-containers/runtime/virtcontainers/device/config"
persistapi "github.com/kata-containers/runtime/virtcontainers/persist/api"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
Expand Down Expand Up @@ -1089,47 +1090,80 @@ func (k *kataAgent) handleShm(grpcSpec *grpc.Spec, sandbox *Sandbox) {
}
}

func (k *kataAgent) appendBlockDevice(dev ContainerDevice, c *Container) *grpc.Device {
device := c.sandbox.devManager.GetDeviceByID(dev.ID)

d, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || d == nil {
k.Logger().WithField("device", device).Error("malformed block drive")
return nil
}

kataDevice := &grpc.Device{
ContainerPath: dev.ContainerPath,
}

switch c.sandbox.config.HypervisorConfig.BlockDeviceDriver {
case config.VirtioMmio:
kataDevice.Type = kataMmioBlkDevType
kataDevice.Id = d.VirtPath
kataDevice.VmPath = d.VirtPath
case config.VirtioBlockCCW:
kataDevice.Type = kataBlkCCWDevType
kataDevice.Id = d.DevNo
case config.VirtioBlock:
kataDevice.Type = kataBlkDevType
kataDevice.Id = d.PCIAddr
case config.VirtioSCSI:
kataDevice.Type = kataSCSIDevType
kataDevice.Id = d.SCSIAddr
case config.Nvdimm:
kataDevice.Type = kataNvdimmDevType
kataDevice.VmPath = fmt.Sprintf("/dev/pmem%s", d.NvdimmID)
}

return kataDevice
}

func (k *kataAgent) appendVhostUserBlkDevice(dev ContainerDevice, c *Container) *grpc.Device {
device := c.sandbox.devManager.GetDeviceByID(dev.ID)

d, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
if !ok || d == nil {
k.Logger().WithField("device", device).Error("malformed vhost-user-blk drive")
return nil
}

kataDevice := &grpc.Device{
ContainerPath: dev.ContainerPath,
Type: kataBlkDevType,
Id: d.PCIAddr,
}

return kataDevice
}

func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*grpc.Device {
var kataDevice *grpc.Device

for _, dev := range c.devices {
device := c.sandbox.devManager.GetDeviceByID(dev.ID)
if device == nil {
k.Logger().WithField("device", dev.ID).Error("failed to find device by id")
return nil
}

if device.DeviceType() != config.DeviceBlock {
continue
switch device.DeviceType() {
case config.DeviceBlock:
kataDevice = k.appendBlockDevice(dev, c)
case config.VhostUserBlk:
kataDevice = k.appendVhostUserBlkDevice(dev, c)
}

d, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || d == nil {
k.Logger().WithField("device", device).Error("malformed block drive")
if kataDevice == nil {
continue
}

kataDevice := &grpc.Device{
ContainerPath: dev.ContainerPath,
}

switch c.sandbox.config.HypervisorConfig.BlockDeviceDriver {
case config.VirtioMmio:
kataDevice.Type = kataMmioBlkDevType
kataDevice.Id = d.VirtPath
kataDevice.VmPath = d.VirtPath
case config.VirtioBlockCCW:
kataDevice.Type = kataBlkCCWDevType
kataDevice.Id = d.DevNo
case config.VirtioBlock:
kataDevice.Type = kataBlkDevType
kataDevice.Id = d.PCIAddr
case config.VirtioSCSI:
kataDevice.Type = kataSCSIDevType
kataDevice.Id = d.SCSIAddr
case config.Nvdimm:
kataDevice.Type = kataNvdimmDevType
kataDevice.VmPath = fmt.Sprintf("/dev/pmem%s", d.NvdimmID)
}

deviceList = append(deviceList, kataDevice)
}

Expand Down Expand Up @@ -1416,6 +1450,53 @@ func (k *kataAgent) handleLocalStorage(mounts []specs.Mount, sandboxID string, r
return localStorages
}

// handleDeviceBlockVolume handles volume that is block device file
// and DeviceBlock type.
func (k *kataAgent) handleDeviceBlockVolume(c *Container, device api.Device) (*grpc.Storage, error) {
vol := &grpc.Storage{}

blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || blockDrive == nil {
k.Logger().Error("malformed block drive")
return nil, fmt.Errorf("malformed block drive")
}
switch {
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlockCCW:
vol.Driver = kataBlkCCWDevType
vol.Source = blockDrive.DevNo
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
vol.Driver = kataBlkDevType
vol.Source = blockDrive.PCIAddr
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioMmio:
vol.Driver = kataMmioBlkDevType
vol.Source = blockDrive.VirtPath
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioSCSI:
vol.Driver = kataSCSIDevType
vol.Source = blockDrive.SCSIAddr
default:
return nil, fmt.Errorf("Unknown block device driver: %s", c.sandbox.config.HypervisorConfig.BlockDeviceDriver)
}

return vol, nil
}

// handleVhostUserBlkVolume handles volume that is block device file
// and VhostUserBlk type.
func (k *kataAgent) handleVhostUserBlkVolume(c *Container, device api.Device) (*grpc.Storage, error) {
vol := &grpc.Storage{}

d, ok := device.GetDeviceInfo().(*config.VhostUserDeviceAttrs)
if !ok || d == nil {
k.Logger().Error("malformed vhost-user blk drive")
return nil, fmt.Errorf("malformed vhost-user blk drive")
}

vol.Driver = kataBlkDevType
vol.Source = d.PCIAddr

return vol, nil
}

// handleBlockVolumes handles volumes that are block devices files
// by passing the block devices as Storage to the agent.
func (k *kataAgent) handleBlockVolumes(c *Container) ([]*grpc.Storage, error) {
Expand All @@ -1433,33 +1514,27 @@ func (k *kataAgent) handleBlockVolumes(c *Container) ([]*grpc.Storage, error) {
// device is detached with detachDevices() for a container.
c.devices = append(c.devices, ContainerDevice{ID: id, ContainerPath: m.Destination})

vol := &grpc.Storage{}
var vol *grpc.Storage

device := c.sandbox.devManager.GetDeviceByID(id)
if device == nil {
k.Logger().WithField("device", id).Error("failed to find device by id")
return nil, fmt.Errorf("Failed to find device by id (id=%s)", id)
}
blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || blockDrive == nil {
k.Logger().Error("malformed block drive")

var err error
switch device.DeviceType() {
case config.DeviceBlock:
vol, err = k.handleDeviceBlockVolume(c, device)
case config.VhostUserBlk:
vol, err = k.handleVhostUserBlkVolume(c, device)
default:
k.Logger().Error("Unknown device type")
continue
}
switch {
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlockCCW:
vol.Driver = kataBlkCCWDevType
vol.Source = blockDrive.DevNo
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioBlock:
vol.Driver = kataBlkDevType
vol.Source = blockDrive.PCIAddr
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioMmio:
vol.Driver = kataMmioBlkDevType
vol.Source = blockDrive.VirtPath
case c.sandbox.config.HypervisorConfig.BlockDeviceDriver == config.VirtioSCSI:
vol.Driver = kataSCSIDevType
vol.Source = blockDrive.SCSIAddr
default:
return nil, fmt.Errorf("Unknown block device driver: %s", c.sandbox.config.HypervisorConfig.BlockDeviceDriver)

if vol == nil || err != nil {
return nil, err
}

vol.MountPoint = m.Destination
Expand Down
128 changes: 126 additions & 2 deletions virtcontainers/kata_agent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,82 @@ func TestHandleLocalStorage(t *testing.T) {
assert.Equal(t, localMountPoint, expected)
}

func TestHandleBlockVolume(t *testing.T) {
k := kataAgent{}

c := &Container{
id: "100",
}
containers := map[string]*Container{}
containers[c.id] = c

// Create a VhostUserBlk device and a DeviceBlock device
vDevID := "MockVhostUserBlk"
bDevID := "MockDeviceBlock"
vDestination := "/VhostUserBlk/destination"
bDestination := "/DeviceBlock/destination"
vPCIAddr := "0001:01"
bPCIAddr := "0002:01"

vDev := drivers.NewVhostUserBlkDevice(&config.DeviceInfo{ID: vDevID})
bDev := drivers.NewBlockDevice(&config.DeviceInfo{ID: bDevID})

vDev.VhostUserDeviceAttrs = &config.VhostUserDeviceAttrs{PCIAddr: vPCIAddr}
bDev.BlockDrive = &config.BlockDrive{PCIAddr: bPCIAddr}

var devices []api.Device
devices = append(devices, vDev, bDev)

// Create a VhostUserBlk mount and a DeviceBlock mount
var mounts []Mount
vMount := Mount{
BlockDeviceID: vDevID,
Destination: vDestination,
}
bMount := Mount{
BlockDeviceID: bDevID,
Destination: bDestination,
}
mounts = append(mounts, vMount, bMount)

tmpDir := "/vhost/user/dir"
dm := manager.NewDeviceManager(manager.VirtioBlock, true, tmpDir, devices)

sConfig := SandboxConfig{}
sConfig.HypervisorConfig.BlockDeviceDriver = manager.VirtioBlock
sandbox := Sandbox{
id: "100",
containers: containers,
hypervisor: &mockHypervisor{},
devManager: dm,
ctx: context.Background(),
config: &sConfig,
}
containers[c.id].sandbox = &sandbox
containers[c.id].mounts = mounts

volumeStorages, err := k.handleBlockVolumes(c)
assert.Nil(t, err, "Error while handling block volumes")

vStorage := &pb.Storage{
MountPoint: vDestination,
Fstype: "bind",
Options: []string{"bind"},
Driver: kataBlkDevType,
Source: vPCIAddr,
}
bStorage := &pb.Storage{
MountPoint: bDestination,
Fstype: "bind",
Options: []string{"bind"},
Driver: kataBlkDevType,
Source: bPCIAddr,
}

assert.Equal(t, vStorage, volumeStorages[0], "Error while handle VhostUserBlk type block volume")
assert.Equal(t, bStorage, volumeStorages[1], "Error while handle BlockDevice type block volume")
}

func TestAppendDevicesEmptyContainerDeviceList(t *testing.T) {
k := kataAgent{}

Expand All @@ -400,7 +476,7 @@ func TestAppendDevicesEmptyContainerDeviceList(t *testing.T) {

c := &Container{
sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-scsi", nil),
devManager: manager.NewDeviceManager("virtio-scsi", false, "", nil),
},
devices: ctrDevices,
}
Expand Down Expand Up @@ -433,7 +509,55 @@ func TestAppendDevices(t *testing.T) {

c := &Container{
sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", ctrDevices),
devManager: manager.NewDeviceManager("virtio-blk", false, "", ctrDevices),
config: sandboxConfig,
},
}
c.devices = append(c.devices, ContainerDevice{
ID: id,
ContainerPath: testBlockDeviceCtrPath,
})

devList := []*pb.Device{}
expected := []*pb.Device{
{
Type: kataBlkDevType,
ContainerPath: testBlockDeviceCtrPath,
Id: testPCIAddr,
},
}
updatedDevList := k.appendDevices(devList, c)
assert.True(t, reflect.DeepEqual(updatedDevList, expected),
"Device lists didn't match: got %+v, expecting %+v",
updatedDevList, expected)
}

func TestAppendVhostUserBlkDevices(t *testing.T) {
k := kataAgent{}

id := "test-append-vhost-user-blk"
ctrDevices := []api.Device{
&drivers.VhostUserBlkDevice{
GenericDevice: &drivers.GenericDevice{
ID: id,
},
VhostUserDeviceAttrs: &config.VhostUserDeviceAttrs{
Type: config.VhostUserBlk,
PCIAddr: testPCIAddr,
},
},
}

sandboxConfig := &SandboxConfig{
HypervisorConfig: HypervisorConfig{
BlockDeviceDriver: config.VirtioBlock,
},
}

testVhostUserStorePath := "/test/vhost/user/store/path"
c := &Container{
sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", true, testVhostUserStorePath, ctrDevices),
config: sandboxConfig,
},
}
Expand Down

0 comments on commit 126fa15

Please sign in to comment.