Skip to content
This repository has been archived by the owner on May 12, 2021. It is now read-only.

Commit

Permalink
virtcontainers: hotplug memory with kata-runtime update command
Browse files Browse the repository at this point in the history
Add support for using update command to hotplug memory to vm.
Connect kata-runtime update interface with hypervisor memory hotplug
feature.

Fixes #625

Signed-off-by: Clare Chen <[email protected]>
  • Loading branch information
cedriccchen committed Sep 17, 2018
1 parent 0928519 commit 13bf7d1
Show file tree
Hide file tree
Showing 10 changed files with 121 additions and 25 deletions.
3 changes: 2 additions & 1 deletion virtcontainers/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,8 @@ type agent interface {
// onlineCPUMem will online CPUs and Memory inside the Sandbox.
// This function should be called after hot adding vCPUs or Memory.
// cpus specifies the number of CPUs that were added and the agent should online
onlineCPUMem(cpus uint32) error
// cpuOnly specifies that we should online cpu or online memory or both
onlineCPUMem(cpus uint32, cpuOnly bool) error

// statsContainer will tell the agent to get stats from a container related to a Sandbox
statsContainer(sandbox *Sandbox, c Container) (*ContainerStats, error)
Expand Down
6 changes: 6 additions & 0 deletions virtcontainers/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2330,12 +2330,18 @@ func TestUpdateContainer(t *testing.T) {

period := uint64(1000)
quota := int64(2000)
memoryLimit := int64(1073741824)
memorySwap := int64(1073741824)
assert := assert.New(t)
resources := specs.LinuxResources{
CPU: &specs.LinuxCPU{
Period: &period,
Quota: &quota,
},
Memory: &specs.LinuxMemory{
Limit: &memoryLimit,
Swap: &memorySwap,
},
}
err := UpdateContainer(ctx, "", "", resources)
assert.Error(err)
Expand Down
96 changes: 85 additions & 11 deletions virtcontainers/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"encoding/hex"
"fmt"
"io"
"math"
"os"
"path/filepath"
"syscall"
Expand Down Expand Up @@ -177,7 +178,7 @@ type ContainerResources struct {
VCPUs uint32

// Mem is the memory that is being used by the container
Mem uint32
MemMB uint32
}

// ContainerConfig describes one container runtime configuration.
Expand Down Expand Up @@ -991,6 +992,7 @@ func (c *Container) update(resources specs.LinuxResources) error {

newResources := ContainerResources{
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
MemMB: uint32(*resources.Memory.Limit >> 20),
}

if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
Expand Down Expand Up @@ -1216,7 +1218,7 @@ func (c *Container) addResources() error {
}
}

return c.sandbox.agent.onlineCPUMem(vcpusAdded)
return c.sandbox.agent.onlineCPUMem(vcpusAdded, true)
}

return nil
Expand Down Expand Up @@ -1247,16 +1249,14 @@ func (c *Container) removeResources() error {
return nil
}

func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
func (c *Container) updateVCPUResources(oldResources, newResources ContainerResources) error {
var vCPUs uint32
oldVCPUs := oldResources.VCPUs
newVCPUs := newResources.VCPUs

// Update vCPUs is not possible if period and/or quota are not set or
// oldVCPUs and newVCPUs are equal.
// Update vCPUs is not possible if oldVCPUs and newVCPUs are equal.
// Don't fail, the constraint still can be applied in the cgroup.
if newVCPUs == 0 || oldVCPUs == newVCPUs {
if oldVCPUs == newVCPUs {
c.Logger().WithFields(logrus.Fields{
"old-vcpus": fmt.Sprintf("%d", oldVCPUs),
"new-vcpus": fmt.Sprintf("%d", newVCPUs),
Expand All @@ -1278,7 +1278,7 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
}
// recalculate the actual number of vCPUs if a different number of vCPUs was added
newResources.VCPUs = oldVCPUs + vcpusAdded
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded); err != nil {
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded, true); err != nil {
return err
}
} else {
Expand All @@ -1296,8 +1296,82 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
// recalculate the actual number of vCPUs if a different number of vCPUs was removed
newResources.VCPUs = oldVCPUs - vcpusRemoved
}
return nil
}

func (c *Container) memHotplugValid(mem *uint32) error {
// TODO: make memory aligned to correct memory boundary according to different architecture
const memorySectionSizeMB = 128
// TODO: make hot add memory to be aligned to memory section in more proper way. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
*mem = uint32(math.Ceil(float64(*mem)/memorySectionSizeMB)) * memorySectionSizeMB

// Set and save container's config
c.config.Resources = newResources
return c.storeContainer()
return nil
}

func (c *Container) updateMemoryResources(oldResources, newResources ContainerResources) error {
oldMemMB := oldResources.MemMB
newMemMB := newResources.MemMB

if oldMemMB == newMemMB {
c.Logger().WithFields(logrus.Fields{
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
"new-mem": fmt.Sprintf("%dMB", newMemMB),
}).Debug("the actual number of Mem will not be modified")
return nil
}

if oldMemMB < newMemMB {
// hot add memory
addMemMB := newMemMB - oldMemMB
if err := c.memHotplugValid(&addMemMB); err != nil {
return err
}

virtLog.Debugf("hot adding %dMB mem", addMemMB)
addMemDevice := &memoryDevice{
sizeMB: int(addMemMB),
}
_, err := c.sandbox.hypervisor.hotplugAddDevice(addMemDevice, memoryDev)
if err != nil {
return err
}
newResources.MemMB = newMemMB
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
return err
}
}
// hot remove memory unsupported
return nil
}

func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
// initialize with oldResources
c.config.Resources.VCPUs = oldResources.VCPUs
c.config.Resources.MemMB = oldResources.MemMB

// Cpu is not updated if period and/or quota not set
if newResources.VCPUs != 0 {
if err := c.updateVCPUResources(oldResources, newResources); err != nil {
return err
}

// Set and save container's config VCPUs field only
c.config.Resources.VCPUs = newResources.VCPUs
if err := c.storeContainer(); err != nil {
return err
}
}

// Memory is not updated if memory limit not set
if newResources.MemMB != 0 {
if err := c.updateMemoryResources(oldResources, newResources); err != nil {
return err
}

// Set and save container's config MemMB field only
c.config.Resources.MemMB = newResources.MemMB
return c.storeContainer()
}

return nil
}
2 changes: 1 addition & 1 deletion virtcontainers/hyperstart_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -883,7 +883,7 @@ func (h *hyper) sendCmd(proxyCmd hyperstartProxyCmd) (interface{}, error) {
return h.client.HyperWithTokens(proxyCmd.cmd, tokens, proxyCmd.message)
}

func (h *hyper) onlineCPUMem(cpus uint32) error {
func (h *hyper) onlineCPUMem(cpus uint32, cpuOnly bool) error {
// hyperstart-agent uses udev to online CPUs automatically
return nil
}
Expand Down
7 changes: 4 additions & 3 deletions virtcontainers/kata_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -1234,10 +1234,11 @@ func (k *kataAgent) resumeContainer(sandbox *Sandbox, c Container) error {
return err
}

func (k *kataAgent) onlineCPUMem(cpus uint32) error {
func (k *kataAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error {
req := &grpc.OnlineCPUMemRequest{
Wait: false,
NbCpus: cpus,
Wait: false,
NbCpus: cpus,
CpuOnly: cpuOnly,
}

_, err := k.sendReq(req)
Expand Down
2 changes: 1 addition & 1 deletion virtcontainers/kata_agent_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ func TestKataAgentSendReq(t *testing.T) {
err = k.resumeContainer(sandbox, Container{})
assert.Nil(err)

err = k.onlineCPUMem(1)
err = k.onlineCPUMem(1, true)
assert.Nil(err)

_, err = k.statsContainer(sandbox, Container{})
Expand Down
2 changes: 1 addition & 1 deletion virtcontainers/noop_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func (n *noopAgent) updateContainer(sandbox *Sandbox, c Container, resources spe
}

// onlineCPUMem is the Noop agent Container online CPU and Memory implementation. It does nothing.
func (n *noopAgent) onlineCPUMem(cpus uint32) error {
func (n *noopAgent) onlineCPUMem(cpus uint32, cpuOnly bool) error {
return nil
}

Expand Down
5 changes: 5 additions & 0 deletions virtcontainers/pkg/oci/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -566,6 +566,11 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
resources.VCPUs = uint32(utils.ConstraintsToVCPUs(*ocispec.Linux.Resources.CPU.Quota, *ocispec.Linux.Resources.CPU.Period))
}
}
if ocispec.Linux.Resources.Memory != nil {
if ocispec.Linux.Resources.Memory.Limit != nil {
resources.MemMB = uint32(*ocispec.Linux.Resources.Memory.Limit >> 20)
}
}

containerConfig := vc.ContainerConfig{
ID: cid,
Expand Down
21 changes: 15 additions & 6 deletions virtcontainers/qemu.go
Original file line number Diff line number Diff line change
Expand Up @@ -1044,6 +1044,11 @@ func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error {
return errors.New("cannot hot unplug memory device")
}

err := q.qmpSetup()
if err != nil {
return err
}

maxMem, err := q.hostMemMB()
if err != nil {
return err
Expand All @@ -1058,16 +1063,20 @@ func (q *qemu) hotplugMemory(memDev *memoryDevice, op operation) error {
memDev.sizeMB, currentMemory, q.config.MemorySize)
}

memoryDevices, err := q.qmpMonitorCh.qmp.ExecQueryMemoryDevices(q.qmpMonitorCh.ctx)
if err != nil {
return fmt.Errorf("failed to query memory devices: %v", err)
}

if len(memoryDevices) != 0 {
memDev.slot = memoryDevices[len(memoryDevices)-1].Data.Slot + 1
}

return q.hotplugAddMemory(memDev)
}

func (q *qemu) hotplugAddMemory(memDev *memoryDevice) error {
err := q.qmpSetup()
if err != nil {
return err
}

err = q.qmpMonitorCh.qmp.ExecHotplugMemory(q.qmpMonitorCh.ctx, "memory-backend-ram", "mem"+strconv.Itoa(memDev.slot), "", memDev.sizeMB)
err := q.qmpMonitorCh.qmp.ExecHotplugMemory(q.qmpMonitorCh.ctx, "memory-backend-ram", "mem"+strconv.Itoa(memDev.slot), "", memDev.sizeMB)
if err != nil {
q.Logger().WithError(err).Error("hotplug memory")
return err
Expand Down
2 changes: 1 addition & 1 deletion virtcontainers/vm.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ func (v *VM) AddMemory(numMB uint32) error {
// OnlineCPUMemory puts the hotplugged CPU and memory online.
func (v *VM) OnlineCPUMemory() error {
v.logger().Infof("online CPU %d and memory", v.cpuDelta)
err := v.agent.onlineCPUMem(v.cpuDelta)
err := v.agent.onlineCPUMem(v.cpuDelta, false)
if err == nil {
v.cpuDelta = 0
}
Expand Down

0 comments on commit 13bf7d1

Please sign in to comment.