Skip to content
This repository has been archived by the owner on Apr 3, 2018. It is now read-only.

Commit

Permalink
pod: Improve pod create performances
Browse files Browse the repository at this point in the history
In order to optimize pod creation, this patch does not wait for the
VM after it has been started. Instead, it will wait for it during the
"start" stage. This will allow our VM to be started while the caller
could potentially do other things between create and start. Globally,
this improves the boot time of our containers.

Signed-off-by: Sebastien Boeuf <[email protected]>
  • Loading branch information
Sebastien Boeuf committed Nov 14, 2017
1 parent e62b2ae commit b20ba87
Show file tree
Hide file tree
Showing 5 changed files with 88 additions and 90 deletions.
3 changes: 2 additions & 1 deletion hypervisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,8 @@ func RunningOnVMM(cpuInfoPath string) (bool, error) {
type hypervisor interface {
init(pod *Pod) error
createPod(podConfig PodConfig) error
startPod(startCh, stopCh chan struct{}) error
startPod() error
waitPod(timeout int) error
stopPod() error
pausePod() error
resumePod() error
Expand Down
8 changes: 5 additions & 3 deletions mock_hypervisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@ func (m *mockHypervisor) createPod(podConfig PodConfig) error {
return nil
}

func (m *mockHypervisor) startPod(startCh, stopCh chan struct{}) error {
var msg struct{}
startCh <- msg
func (m *mockHypervisor) startPod() error {
return nil
}

func (m *mockHypervisor) waitPod(timeout int) error {
return nil
}

Expand Down
32 changes: 13 additions & 19 deletions mock_hypervisor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package virtcontainers
import (
"fmt"
"testing"
"time"
)

func TestMockHypervisorInit(t *testing.T) {
Expand All @@ -36,8 +35,7 @@ func TestMockHypervisorInit(t *testing.T) {
}

// wrong config
err := m.init(pod)
if err == nil {
if err := m.init(pod); err == nil {
t.Fatal()
}

Expand All @@ -48,8 +46,7 @@ func TestMockHypervisorInit(t *testing.T) {
}

// right config
err = m.init(pod)
if err != nil {
if err := m.init(pod); err != nil {
t.Fatal(err)
}
}
Expand All @@ -59,42 +56,39 @@ func TestMockHypervisorCreatePod(t *testing.T) {

config := PodConfig{}

err := m.createPod(config)
if err != nil {
if err := m.createPod(config); err != nil {
t.Fatal(err)
}
}

func TestMockHypervisorStartPod(t *testing.T) {
var m *mockHypervisor

startCh := make(chan struct{})
stopCh := make(chan struct{})
if err := m.startPod(); err != nil {
t.Fatal(err)
}
}

go m.startPod(startCh, stopCh)
func TestMockHypervisorWaitPod(t *testing.T) {
var m *mockHypervisor

select {
case <-startCh:
break
case <-time.After(time.Second):
t.Fatal("Timeout waiting for start notification")
if err := m.waitPod(0); err != nil {
t.Fatal(err)
}
}

func TestMockHypervisorStopPod(t *testing.T) {
var m *mockHypervisor

err := m.stopPod()
if err != nil {
if err := m.stopPod(); err != nil {
t.Fatal(err)
}
}

func TestMockHypervisorAddDevice(t *testing.T) {
var m *mockHypervisor

err := m.addDevice(nil, imgDev)
if err != nil {
if err := m.addDevice(nil, imgDev); err != nil {
t.Fatal(err)
}
}
Expand Down
46 changes: 18 additions & 28 deletions pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"strings"
"sync"
"syscall"
"time"

"github.com/sirupsen/logrus"
)
Expand All @@ -39,6 +38,10 @@ const controlSocket = "ctrl.sock"
// to understand if the VM is still alive or not.
const monitorSocket = "monitor.sock"

// vmStartTimeout represents the time in seconds a pod can wait before
// to consider the VM starting operation failed.
const vmStartTimeout = 10

// stateString is a string representing a pod state.
type stateString string

Expand Down Expand Up @@ -748,34 +751,13 @@ func (p *Pod) startSetState() error {
return nil
}

// startVM starts the VM, ensuring it is started before it returns or issuing
// an error in case of timeout. Then it connects to the agent inside the VM.
// startVM starts the VM.
func (p *Pod) startVM(netNsPath string) error {
vmStartedCh := make(chan struct{})
vmStoppedCh := make(chan struct{})
const timeout = time.Duration(10) * time.Second

l := p.Logger()
l.Info("Starting VM")

go func() {
p.network.run(netNsPath, func() error {
err := p.hypervisor.startPod(vmStartedCh, vmStoppedCh)
return err
})
}()

// Wait for the pod started notification
select {
case <-vmStartedCh:
break
case <-time.After(timeout):
return fmt.Errorf("Did not receive the pod started notification (timeout %ds)", timeout)
}

l.Info("VM started")
p.Logger().Info("Starting VM")

return nil
return p.network.run(netNsPath, func() error {
return p.hypervisor.startPod()
})
}

// startShims registers all containers to the proxy and starts one
Expand Down Expand Up @@ -835,6 +817,14 @@ func (p *Pod) start() error {
return err
}

l := p.Logger()

if err := p.hypervisor.waitPod(vmStartTimeout); err != nil {
return err
}

l.Info("VM started")

if _, _, err := p.proxy.connect(*p, false); err != nil {
return err
}
Expand All @@ -855,7 +845,7 @@ func (p *Pod) start() error {
}
}

p.Logger().Info("started")
l.Info("started")

return nil
}
Expand Down
89 changes: 50 additions & 39 deletions qemu.go
Original file line number Diff line number Diff line change
Expand Up @@ -573,40 +573,6 @@ func (q *qemu) init(pod *Pod) error {
return nil
}

func (q *qemu) qmpMonitor(connectedCh chan struct{}) {
defer func(qemu *qemu) {
if q.qmpMonitorCh.qmp != nil {
q.qmpMonitorCh.qmp.Shutdown()
}

q.qmpMonitorCh.wg.Done()
}(q)

cfg := ciaoQemu.QMPConfig{Logger: newQMPLogger()}
qmp, ver, err := ciaoQemu.QMPStart(q.qmpMonitorCh.ctx, q.qmpMonitorCh.path, cfg, q.qmpMonitorCh.disconnectCh)
if err != nil {
q.Logger().WithError(err).Error("Failed to connect to QEMU instance")
return
}

q.qmpMonitorCh.qmp = qmp

q.Logger().WithFields(logrus.Fields{
"qmp-major-version": ver.Major,
"qmp-minor-version": ver.Minor,
"qmp-micro-version": ver.Micro,
"qmp-capabilities": strings.Join(ver.Capabilities, ","),
}).Infof("QMP details")

err = q.qmpMonitorCh.qmp.ExecuteQMPCapabilities(q.qmpMonitorCh.ctx)
if err != nil {
q.Logger().WithError(err).Error(qmpCapErrMsg)
return
}

close(connectedCh)
}

func (q *qemu) setCPUResources(podConfig PodConfig) ciaoQemu.SMP {
vcpus := q.config.DefaultVCPUs
if podConfig.VMConfig.VCPUs > 0 {
Expand Down Expand Up @@ -775,16 +741,61 @@ func (q *qemu) createPod(podConfig PodConfig) error {
}

// startPod will start the Pod's VM.
func (q *qemu) startPod(startCh, stopCh chan struct{}) error {
func (q *qemu) startPod() error {
strErr, err := ciaoQemu.LaunchQemu(q.qemuConfig, newQMPLogger())
if err != nil {
return fmt.Errorf("%s", strErr)
}

// Start the QMP monitoring thread
q.qmpMonitorCh.disconnectCh = stopCh
q.qmpMonitorCh.wg.Add(1)
q.qmpMonitor(startCh)
return nil
}

// waitPod will wait for the Pod's VM to be up and running.
func (q *qemu) waitPod(timeout int) error {
defer func(qemu *qemu) {
if q.qmpMonitorCh.qmp != nil {
q.qmpMonitorCh.qmp.Shutdown()
}
}(q)

if timeout < 0 {
return fmt.Errorf("Invalid timeout %ds", timeout)
}

disconnectCh := make(chan struct{})
cfg := ciaoQemu.QMPConfig{Logger: newQMPLogger()}

var qmp *ciaoQemu.QMP
var ver *ciaoQemu.QMPVersion
var err error

timeStart := time.Now()
for {
qmp, ver, err = ciaoQemu.QMPStart(q.qmpMonitorCh.ctx, q.qmpMonitorCh.path, cfg, disconnectCh)
if err == nil {
break
}

if int(time.Now().Sub(timeStart).Seconds()) > timeout {
return fmt.Errorf("Failed to connect to QEMU instance (timeout %ds): %v", timeout, err)
}

time.Sleep(time.Duration(50) * time.Millisecond)
}

q.qmpMonitorCh.qmp = qmp

q.Logger().WithFields(logrus.Fields{
"qmp-major-version": ver.Major,
"qmp-minor-version": ver.Minor,
"qmp-micro-version": ver.Micro,
"qmp-capabilities": strings.Join(ver.Capabilities, ","),
}).Infof("QMP details")

if err = q.qmpMonitorCh.qmp.ExecuteQMPCapabilities(q.qmpMonitorCh.ctx); err != nil {
q.Logger().WithError(err).Error(qmpCapErrMsg)
return err
}

return nil
}
Expand Down

0 comments on commit b20ba87

Please sign in to comment.