Skip to content
This repository has been archived by the owner on May 12, 2021. It is now read-only.

Commit

Permalink
sandbox/virtcontainers: memory resource hotplug when create container.
Browse files Browse the repository at this point in the history
When create sandbox, we setup a sandbox of 128M base memory, and
then hotplug memory that is needed for every new container. And
we change the unit of c.config.Resources.Mem from MiB to Byte in
order to prevent the 4095B < memory < 1MiB from being lost.

Fixes #400

Signed-off-by: Clare Chen <[email protected]>
Signed-off-by: Zichang Lin <[email protected]>
  • Loading branch information
linzichang committed Sep 26, 2018
1 parent e509150 commit bd843a9
Show file tree
Hide file tree
Showing 6 changed files with 57 additions and 111 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ DEFVCPUS := 1
# Default maximum number of vCPUs
DEFMAXVCPUS := 0
# Default memory size in MiB
DEFMEMSZ := 2048
DEFMEMSZ := 128
# Default memory slots
# Cases to consider :
# - nvdimm rootfs image
Expand Down
10 changes: 5 additions & 5 deletions virtcontainers/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,11 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
}
}()

// get and store guest details
if err := s.getAndStoreGuestDetails(); err != nil {
return nil, err
}

// Create Containers
if err = s.createContainers(); err != nil {
return nil, err
Expand All @@ -122,11 +127,6 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
return nil, err
}

// get and store guest details
if err := s.getAndStoreGuestDetails(); err != nil {
return nil, err
}

return s, nil
}

Expand Down
77 changes: 47 additions & 30 deletions virtcontainers/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ type ContainerResources struct {
VCPUs uint32

// Mem is the memory that is being used by the container
MemMB uint32
Mem int64
}

// ContainerConfig describes one container runtime configuration.
Expand Down Expand Up @@ -984,7 +984,8 @@ func (c *Container) update(resources specs.LinuxResources) error {

newResources := ContainerResources{
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
MemMB: uint32(*resources.Memory.Limit >> 20),
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect
Mem: (*resources.Memory.Limit >> 12) << 12,
}

if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
Expand Down Expand Up @@ -1181,15 +1182,14 @@ func (c *Container) detachDevices() error {
}

func (c *Container) addResources() error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
if c.config == nil {
return nil
}

// Container is being created, try to add the number of vCPUs specified
vCPUs := c.config.Resources.VCPUs
if vCPUs != 0 {
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
virtLog.Debugf("create container: hot adding %d vCPUs", vCPUs)
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
if err != nil {
return err
Expand All @@ -1210,14 +1210,32 @@ func (c *Container) addResources() error {
}
}

return c.sandbox.agent.onlineCPUMem(vcpusAdded, true)
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded, true); err != nil {
return err
}
}

// try to add the number of Mem specified
addMem := c.config.Resources.Mem
if addMem != 0 {
memHotplugMB, err := c.calcHotplugMemSize(addMem)
if err != nil {
return err
}
virtLog.Debugf("create container: hotplug %dMB mem", memHotplugMB)
_, err = c.sandbox.hypervisor.hotplugAddDevice(&memoryDevice{sizeMB: int(memHotplugMB)}, memoryDev)
if err != nil {
return err
}
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
return err
}
}

return nil
}

func (c *Container) removeResources() error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
if c.config == nil {
return nil
}
Expand All @@ -1237,6 +1255,7 @@ func (c *Container) removeResources() error {
return err
}
}
// hot remove memory unsupported

return nil
}
Expand All @@ -1259,7 +1278,7 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
if oldVCPUs < newVCPUs {
// hot add vCPUs
vCPUs = newVCPUs - oldVCPUs
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
virtLog.Debugf("update container: hot adding %d vCPUs", vCPUs)
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
if err != nil {
return err
Expand Down Expand Up @@ -1291,45 +1310,43 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
return nil
}

func (c *Container) memHotplugValid(mem uint32) (uint32, error) {
memorySectionSizeMB := c.sandbox.state.GuestMemoryBlockSizeMB
if memorySectionSizeMB == 0 {
return mem, nil
// calculate hotplug memory size with memory block size of guestos
func (c *Container) calcHotplugMemSize(mem int64) (uint32, error) {
memoryBlockSize := int64(c.sandbox.state.GuestMemoryBlockSizeMB << 20)
if memoryBlockSize == 0 {
return uint32(mem >> 20), nil
}

// TODO: hot add memory aligned to memory section should be more properly. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
return uint32((int64(math.Ceil(float64(mem)/float64(memoryBlockSize))) * memoryBlockSize) >> 20), nil
}

func (c *Container) updateMemoryResources(oldResources, newResources ContainerResources) error {
oldMemMB := oldResources.MemMB
newMemMB := newResources.MemMB
oldMem := oldResources.Mem
newMem := newResources.Mem

if oldMemMB == newMemMB {
if oldMem == newMem {
c.Logger().WithFields(logrus.Fields{
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
"new-mem": fmt.Sprintf("%dMB", newMemMB),
"old-mem": fmt.Sprintf("%d", oldMem),
"new-mem": fmt.Sprintf("%d", newMem),
}).Debug("the actual number of Mem will not be modified")
return nil
}

if oldMemMB < newMemMB {
if oldMem < newMem {
// hot add memory
addMemMB := newMemMB - oldMemMB
memHotplugMB, err := c.memHotplugValid(addMemMB)
addMem := newMem - oldMem
memHotplugMB, err := c.calcHotplugMemSize(addMem)
if err != nil {
return err
}

virtLog.Debugf("hotplug %dMB mem", memHotplugMB)
addMemDevice := &memoryDevice{
sizeMB: int(memHotplugMB),
}
_, err = c.sandbox.hypervisor.hotplugAddDevice(addMemDevice, memoryDev)
virtLog.Debugf("update container: hotplug %dMB mem", memHotplugMB)
_, err = c.sandbox.hypervisor.hotplugAddDevice(&memoryDevice{sizeMB: int(memHotplugMB)}, memoryDev)
if err != nil {
return err
}
newResources.MemMB = newMemMB
newResources.Mem = newMem
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
return err
}
Expand All @@ -1341,7 +1358,7 @@ func (c *Container) updateMemoryResources(oldResources, newResources ContainerRe
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
// initialize with oldResources
c.config.Resources.VCPUs = oldResources.VCPUs
c.config.Resources.MemMB = oldResources.MemMB
c.config.Resources.Mem = oldResources.Mem

// Cpu is not updated if period and/or quota not set
if newResources.VCPUs != 0 {
Expand All @@ -1357,13 +1374,13 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
}

// Memory is not updated if memory limit not set
if newResources.MemMB != 0 {
if newResources.Mem != 0 {
if err := c.updateMemoryResources(oldResources, newResources); err != nil {
return err
}

// Set and save container's config MemMB field only
c.config.Resources.MemMB = newResources.MemMB
// Set and save container's config Mem field only
c.config.Resources.Mem = newResources.Mem
return c.storeContainer()
}

Expand Down
4 changes: 2 additions & 2 deletions virtcontainers/hypervisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ const (

const (
defaultVCPUs = 1
// 2 GiB
defaultMemSzMiB = 2048
// 128 MiB
defaultMemSzMiB = 128

defaultBridges = 1

Expand Down
27 changes: 2 additions & 25 deletions virtcontainers/pkg/oci/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,25 +412,6 @@ func (spec *CompatOCISpec) SandboxID() (string, error) {
return "", fmt.Errorf("Could not find sandbox ID")
}

func updateVMConfig(ocispec CompatOCISpec, config *RuntimeConfig) error {
if ocispec.Linux == nil || ocispec.Linux.Resources == nil {
return nil
}

if ocispec.Linux.Resources.Memory != nil &&
ocispec.Linux.Resources.Memory.Limit != nil {
memBytes := *ocispec.Linux.Resources.Memory.Limit
if memBytes <= 0 {
return fmt.Errorf("Invalid OCI memory limit %d", memBytes)
}
// Use some math magic to round up to the nearest Mb.
// This has the side effect that we can never have <1Mb assigned.
config.HypervisorConfig.MemorySize = uint32((memBytes + (1024*1024 - 1)) / (1024 * 1024))
}

return nil
}

func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
assetAnnotations := []string{
vcAnnotations.KernelPath,
Expand Down Expand Up @@ -469,11 +450,6 @@ func SandboxConfig(ocispec CompatOCISpec, runtime RuntimeConfig, bundlePath, cid
return vc.SandboxConfig{}, err
}

err = updateVMConfig(ocispec, &runtime)
if err != nil {
return vc.SandboxConfig{}, err
}

ociSpecJSON, err := json.Marshal(ocispec)
if err != nil {
return vc.SandboxConfig{}, err
Expand Down Expand Up @@ -570,7 +546,8 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
}
if ocispec.Linux.Resources.Memory != nil {
if ocispec.Linux.Resources.Memory.Limit != nil {
resources.MemMB = uint32(*ocispec.Linux.Resources.Memory.Limit >> 20)
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect
resources.Mem = (*ocispec.Linux.Resources.Memory.Limit >> 12) << 12
}
}

Expand Down
48 changes: 0 additions & 48 deletions virtcontainers/pkg/oci/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,54 +254,6 @@ func TestMinimalSandboxConfig(t *testing.T) {
}
}

func TestUpdateVmConfig(t *testing.T) {
var limitBytes int64 = 128 * 1024 * 1024
assert := assert.New(t)

config := RuntimeConfig{
HypervisorConfig: vc.HypervisorConfig{
MemorySize: 2048,
},
}

expectedMem := uint32(128)

ocispec := CompatOCISpec{
Spec: specs.Spec{
Linux: &specs.Linux{
Resources: &specs.LinuxResources{
Memory: &specs.LinuxMemory{
Limit: &limitBytes,
},
},
},
},
}

err := updateVMConfig(ocispec, &config)
assert.Nil(err)
assert.Equal(config.HypervisorConfig.MemorySize, expectedMem)

limitBytes = -128 * 1024 * 1024
ocispec.Linux.Resources.Memory.Limit = &limitBytes

err = updateVMConfig(ocispec, &config)
assert.NotNil(err)

// Test case when Memory is nil
ocispec.Spec.Linux.Resources.Memory = nil
err = updateVMConfig(ocispec, &config)
assert.Nil(err)

// Test case when CPU is nil
ocispec.Spec.Linux.Resources.CPU = nil
limitBytes = 20
ocispec.Linux.Resources.Memory = &specs.LinuxMemory{Limit: &limitBytes}
err = updateVMConfig(ocispec, &config)
assert.Nil(err)
assert.NotEqual(config.HypervisorConfig.MemorySize, expectedMem)
}

func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
ociState := StatusToOCIState(cStatus)

Expand Down

0 comments on commit bd843a9

Please sign in to comment.