Skip to content
This repository has been archived by the owner on May 12, 2021. It is now read-only.

Commit

Permalink
virtcontainers/qemu: honour CPU constrains
Browse files Browse the repository at this point in the history
Don't fail if a new container with a CPU constraint was added to
a POD and no more vCPUs are available, instead apply the constraint
and let kernel balance the resources.

Signed-off-by: Julio Montes <[email protected]>
  • Loading branch information
Julio Montes committed May 14, 2018
1 parent 07db945 commit 4527a80
Show file tree
Hide file tree
Showing 8 changed files with 151 additions and 87 deletions.
87 changes: 60 additions & 27 deletions virtcontainers/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,15 +57,11 @@ type ContainerStatus struct {

// ContainerResources describes container resources
type ContainerResources struct {
// CPUQuota specifies the total amount of time in microseconds
// The number of microseconds per CPUPeriod that the container is guaranteed CPU access
CPUQuota int64
// VCPUs are the number of vCPUs that are being used by the container
VCPUs uint32

// CPUPeriod specifies the CPU CFS scheduler period of time in microseconds
CPUPeriod uint64

// CPUShares specifies container's weight vs. other containers
CPUShares uint64
// Mem is the memory that is being used by the container
Mem uint32
}

// ContainerConfig describes one container runtime configuration.
Expand Down Expand Up @@ -804,8 +800,7 @@ func (c *Container) update(resources specs.LinuxResources) error {
}

newResources := ContainerResources{
CPUPeriod: *resources.CPU.Period,
CPUQuota: *resources.CPU.Quota,
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
}

if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
Expand Down Expand Up @@ -866,7 +861,7 @@ func (c *Container) hotplugDrive() error {
Index: driveIndex,
}

if err := c.sandbox.hypervisor.hotplugAddDevice(&drive, blockDev); err != nil {
if _, err := c.sandbox.hypervisor.hotplugAddDevice(&drive, blockDev); err != nil {
return err
}

Expand Down Expand Up @@ -903,7 +898,7 @@ func (c *Container) removeDrive() (err error) {
l := c.Logger().WithField("device-id", devID)
l.Info("Unplugging block device")

if err := c.sandbox.hypervisor.hotplugRemoveDevice(drive, blockDev); err != nil {
if _, err := c.sandbox.hypervisor.hotplugRemoveDevice(drive, blockDev); err != nil {
l.WithError(err).Info("Failed to unplug block device")
return err
}
Expand Down Expand Up @@ -938,14 +933,31 @@ func (c *Container) addResources() error {
return nil
}

vCPUs := utils.ConstraintsToVCPUs(c.config.Resources.CPUQuota, c.config.Resources.CPUPeriod)
// Container is being created, try to add the number of vCPUs specified
vCPUs := c.config.Resources.VCPUs
if vCPUs != 0 {
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
if err := c.sandbox.hypervisor.hotplugAddDevice(uint32(vCPUs), cpuDev); err != nil {
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
if err != nil {
return err
}

return c.sandbox.agent.onlineCPUMem(uint32(vCPUs))
vcpusAdded, ok := data.(uint32)
if !ok {
return fmt.Errorf("Could not get the number of vCPUs added, got %+v", data)
}

// A different number of vCPUs was added, we have to update
// the resources in order to don't remove vCPUs used by other containers.
if vcpusAdded != vCPUs {
// Set and save container's config
c.config.Resources.VCPUs = vcpusAdded
if err := c.storeContainer(); err != nil {
return err
}
}

return c.sandbox.agent.onlineCPUMem(vcpusAdded)
}

return nil
Expand All @@ -957,10 +969,18 @@ func (c *Container) removeResources() error {
return nil
}

vCPUs := utils.ConstraintsToVCPUs(c.config.Resources.CPUQuota, c.config.Resources.CPUPeriod)
// In order to don't remove vCPUs used by other containers, we have to remove
// only the vCPUs assigned to the container
config, err := c.sandbox.storage.fetchContainerConfig(c.sandbox.id, c.id)
if err != nil {
// don't fail, let's use the default configuration
config = *c.config
}

vCPUs := config.Resources.VCPUs
if vCPUs != 0 {
virtLog.Debugf("hot removing %d vCPUs", vCPUs)
if err := c.sandbox.hypervisor.hotplugRemoveDevice(uint32(vCPUs), cpuDev); err != nil {
if _, err := c.sandbox.hypervisor.hotplugRemoveDevice(vCPUs, cpuDev); err != nil {
return err
}
}
Expand All @@ -970,9 +990,9 @@ func (c *Container) removeResources() error {

func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
var vCPUs uint
oldVCPUs := utils.ConstraintsToVCPUs(oldResources.CPUQuota, oldResources.CPUPeriod)
newVCPUs := utils.ConstraintsToVCPUs(newResources.CPUQuota, newResources.CPUPeriod)
var vCPUs uint32
oldVCPUs := oldResources.VCPUs
newVCPUs := newResources.VCPUs

// Update vCPUs is not possible if period and/or quota are not set or
// oldVCPUs and newVCPUs are equal.
Expand All @@ -989,23 +1009,36 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
// hot add vCPUs
vCPUs = newVCPUs - oldVCPUs
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
if err := c.sandbox.hypervisor.hotplugAddDevice(uint32(vCPUs), cpuDev); err != nil {
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
if err != nil {
return err
}
vcpusAdded, ok := data.(uint32)
if !ok {
return fmt.Errorf("Could not get the number of vCPUs added, got %+v", data)
}
// recalculate the actual number of vCPUs if a different number of vCPUs was added
newResources.VCPUs = oldVCPUs + vcpusAdded
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded); err != nil {
return err
}
} else {
// hot remove vCPUs
vCPUs = oldVCPUs - newVCPUs
virtLog.Debugf("hot removing %d vCPUs", vCPUs)
if err := c.sandbox.hypervisor.hotplugRemoveDevice(uint32(vCPUs), cpuDev); err != nil {
data, err := c.sandbox.hypervisor.hotplugRemoveDevice(vCPUs, cpuDev)
if err != nil {
return err
}
vcpusRemoved, ok := data.(uint32)
if !ok {
return fmt.Errorf("Could not get the number of vCPUs removed, got %+v", data)
}
// recalculate the actual number of vCPUs if a different number of vCPUs was removed
newResources.VCPUs = oldVCPUs - vcpusRemoved
}

// Set and save container's config
c.config.Resources = newResources
if err := c.storeContainer(); err != nil {
return err
}

return c.sandbox.agent.onlineCPUMem(uint32(vCPUs))
return c.storeContainer()
}
37 changes: 28 additions & 9 deletions virtcontainers/container_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,11 @@ func TestCheckSandboxRunningSuccessful(t *testing.T) {
func TestContainerAddResources(t *testing.T) {
assert := assert.New(t)

c := &Container{}
c := &Container{
sandbox: &Sandbox{
storage: &filesystem{},
},
}
err := c.addResources()
assert.Nil(err)

Expand All @@ -297,13 +301,16 @@ func TestContainerAddResources(t *testing.T) {
err = c.addResources()
assert.Nil(err)

vCPUs := uint32(5)
c.config.Resources = ContainerResources{
CPUQuota: 5000,
CPUPeriod: 1000,
VCPUs: vCPUs,
}
c.sandbox = &Sandbox{
hypervisor: &mockHypervisor{},
agent: &noopAgent{},
hypervisor: &mockHypervisor{
vCPUs: vCPUs,
},
agent: &noopAgent{},
storage: &filesystem{},
}
err = c.addResources()
assert.Nil(err)
Expand All @@ -312,7 +319,12 @@ func TestContainerAddResources(t *testing.T) {
func TestContainerRemoveResources(t *testing.T) {
assert := assert.New(t)

c := &Container{}
c := &Container{
sandbox: &Sandbox{
storage: &filesystem{},
},
}

err := c.addResources()
assert.Nil(err)

Expand All @@ -325,11 +337,18 @@ func TestContainerRemoveResources(t *testing.T) {
err = c.removeResources()
assert.Nil(err)

vCPUs := uint32(5)
c.config.Resources = ContainerResources{
CPUQuota: 5000,
CPUPeriod: 1000,
VCPUs: vCPUs,
}
c.sandbox = &Sandbox{hypervisor: &mockHypervisor{}}

c.sandbox = &Sandbox{
hypervisor: &mockHypervisor{
vCPUs: vCPUs,
},
storage: &filesystem{},
}

err = c.removeResources()
assert.Nil(err)
}
Expand Down
11 changes: 0 additions & 11 deletions virtcontainers/hyperstart_agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,17 +430,6 @@ func (h *hyper) startOneContainer(sandbox *Sandbox, c *Container) error {
Process: process,
}

if c.config.Resources.CPUQuota != 0 && c.config.Resources.CPUPeriod != 0 {
container.Constraints = hyperstart.Constraints{
CPUQuota: c.config.Resources.CPUQuota,
CPUPeriod: c.config.Resources.CPUPeriod,
}
}

if c.config.Resources.CPUShares != 0 {
container.Constraints.CPUShares = c.config.Resources.CPUShares
}

container.SystemMountsInfo.BindMountDev = c.systemMountsInfo.BindMountDev

if c.state.Fstype != "" {
Expand Down
4 changes: 2 additions & 2 deletions virtcontainers/hypervisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -499,8 +499,8 @@ type hypervisor interface {
pauseSandbox() error
resumeSandbox() error
addDevice(devInfo interface{}, devType deviceType) error
hotplugAddDevice(devInfo interface{}, devType deviceType) error
hotplugRemoveDevice(devInfo interface{}, devType deviceType) error
hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error)
hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error)
getSandboxConsole(sandboxID string) (string, error)
capabilities() capabilities
}
17 changes: 13 additions & 4 deletions virtcontainers/mock_hypervisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
package virtcontainers

type mockHypervisor struct {
vCPUs uint32
}

func (m *mockHypervisor) init(sandbox *Sandbox) error {
Expand Down Expand Up @@ -49,12 +50,20 @@ func (m *mockHypervisor) addDevice(devInfo interface{}, devType deviceType) erro
return nil
}

func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) error {
return nil
func (m *mockHypervisor) hotplugAddDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
switch devType {
case cpuDev:
return m.vCPUs, nil
}
return nil, nil
}

func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) error {
return nil
func (m *mockHypervisor) hotplugRemoveDevice(devInfo interface{}, devType deviceType) (interface{}, error) {
switch devType {
case cpuDev:
return m.vCPUs, nil
}
return nil, nil
}

func (m *mockHypervisor) getSandboxConsole(sandboxID string) (string, error) {
Expand Down
7 changes: 2 additions & 5 deletions virtcontainers/pkg/oci/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/kata-containers/runtime/virtcontainers/device/config"
vcAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations"
dockershimAnnotations "github.com/kata-containers/runtime/virtcontainers/pkg/annotations/dockershim"
"github.com/kata-containers/runtime/virtcontainers/utils"
)

type annotationContainerType struct {
Expand Down Expand Up @@ -562,11 +563,7 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
if ocispec.Linux.Resources.CPU != nil {
if ocispec.Linux.Resources.CPU.Quota != nil &&
ocispec.Linux.Resources.CPU.Period != nil {
resources.CPUQuota = *ocispec.Linux.Resources.CPU.Quota
resources.CPUPeriod = *ocispec.Linux.Resources.CPU.Period
}
if ocispec.Linux.Resources.CPU.Shares != nil {
resources.CPUShares = *ocispec.Linux.Resources.CPU.Shares
resources.VCPUs = uint32(utils.ConstraintsToVCPUs(*ocispec.Linux.Resources.CPU.Quota, *ocispec.Linux.Resources.CPU.Period))
}
}

Expand Down
Loading

0 comments on commit 4527a80

Please sign in to comment.