Windows: Updates Windows Vendoring

Updates windows dependent libraries for vendoing.
This commit is contained in:
Nathan Gieseker
2019-01-23 18:43:18 -08:00
parent a686cc4bd8
commit 9a429d8d25
839 changed files with 282895 additions and 774 deletions

View File

@ -0,0 +1,257 @@
{
"ociVersion":"1.0.1",
"process":{
"user":{
"uid":0,
"gid":0
},
"args":null,
"env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"TERM=xterm"
],
"cwd":"/",
"capabilities":{
"bounding":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"inheritable":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
}
},
"mounts":[
{
"destination":"/proc",
"type":"proc",
"source":"proc",
"options":[
"nosuid",
"noexec",
"nodev"
]
},
{
"destination":"/dev",
"type":"tmpfs",
"source":"tmpfs",
"options":[
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination":"/dev/pts",
"type":"devpts",
"source":"devpts",
"options":[
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination":"/sys",
"type":"sysfs",
"source":"sysfs",
"options":[
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination":"/sys/fs/cgroup",
"type":"cgroup",
"source":"cgroup",
"options":[
"ro",
"nosuid",
"noexec",
"nodev"
]
},
{
"destination":"/dev/mqueue",
"type":"mqueue",
"source":"mqueue",
"options":[
"nosuid",
"noexec",
"nodev"
]
},
{
"destination":"/dev/shm",
"type":"tmpfs",
"source":"shm",
"options":[
"nosuid",
"noexec",
"nodev",
"mode=1777"
]
}
],
"linux":{
"resources":{
"devices":[
{
"allow":false,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":5,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":3,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":9,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":8,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":5,
"minor":0,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":5,
"minor":1,
"access":"rwm"
},
{
"allow":false,
"type":"c",
"major":10,
"minor":229,
"access":"rwm"
}
]
},
"namespaces":[
{
"type":"mount"
},
{
"type":"network"
},
{
"type":"uts"
},
{
"type":"pid"
},
{
"type":"ipc"
}
],
"maskedPaths":[
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"readonlyPaths":[
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
},
"windows":{
"layerFolders":null
}
}

View File

@ -0,0 +1,11 @@
{
"ociVersion": "1.0.1",
"process": {
"args": null,
"env": [],
"cwd": "c:\\"
},
"windows": {
"layerFolders": null
}
}

View File

@ -0,0 +1,254 @@
{
"ociVersion":"1.0.1",
"process":{
"user":{
"uid":0,
"gid":0
},
"args":["/bin/sh", "-c", "echo hello world"],
"cwd":"/",
"capabilities":{
"bounding":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"effective":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"inheritable":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
],
"permitted":[
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE"
]
}
},
"mounts":[
{
"destination":"/proc",
"type":"proc",
"source":"proc",
"options":[
"nosuid",
"noexec",
"nodev"
]
},
{
"destination":"/dev",
"type":"tmpfs",
"source":"tmpfs",
"options":[
"nosuid",
"strictatime",
"mode=755",
"size=65536k"
]
},
{
"destination":"/dev/pts",
"type":"devpts",
"source":"devpts",
"options":[
"nosuid",
"noexec",
"newinstance",
"ptmxmode=0666",
"mode=0620",
"gid=5"
]
},
{
"destination":"/sys",
"type":"sysfs",
"source":"sysfs",
"options":[
"nosuid",
"noexec",
"nodev",
"ro"
]
},
{
"destination":"/sys/fs/cgroup",
"type":"cgroup",
"source":"cgroup",
"options":[
"ro",
"nosuid",
"noexec",
"nodev"
]
},
{
"destination":"/dev/mqueue",
"type":"mqueue",
"source":"mqueue",
"options":[
"nosuid",
"noexec",
"nodev"
]
},
{
"destination":"/dev/shm",
"type":"tmpfs",
"source":"shm",
"options":[
"nosuid",
"noexec",
"nodev",
"mode=1777"
]
}
],
"linux":{
"resources":{
"devices":[
{
"allow":false,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":5,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":3,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":9,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":1,
"minor":8,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":5,
"minor":0,
"access":"rwm"
},
{
"allow":true,
"type":"c",
"major":5,
"minor":1,
"access":"rwm"
},
{
"allow":false,
"type":"c",
"major":10,
"minor":229,
"access":"rwm"
}
]
},
"namespaces":[
{
"type":"mount"
},
{
"type":"network"
},
{
"type":"uts"
},
{
"type":"pid"
},
{
"type":"ipc"
}
],
"maskedPaths":[
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"readonlyPaths":[
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
},
"windows":{
"HyperV": {},
"layerFolders":["C:\\docker_images\\lcow\\bcc7d1fc0b8294c100274bb07984400ac1af6d375cb583672d5071c855c73cc2", "C:\\docker_images\\lcow\\c718cb96ac6354b411660c24a4a54e8c8cb3052422b43589ea6af5a745ded451"]
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,224 @@
// +build functional lcow
package functional
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hcsoci"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
)
// TestLCOWUVMNoSCSINoVPMemInitrd starts an LCOW utility VM without a SCSI controller and
// no VPMem device. Uses initrd.
func TestLCOWUVMNoSCSINoVPMemInitrd(t *testing.T) {
opts := uvm.NewDefaultOptionsLCOW(t.Name(), "")
opts.SCSIControllerCount = 0
opts.VPMemDeviceCount = 0
opts.PreferredRootFSType = uvm.PreferredRootFSTypeInitRd
opts.RootFSFile = uvm.InitrdFile
testLCOWUVMNoSCSISingleVPMem(t, opts, fmt.Sprintf("Command line: initrd=/%s", opts.RootFSFile))
}
// TestLCOWUVMNoSCSISingleVPMemVHD starts an LCOW utility VM without a SCSI controller and
// only a single VPMem device. Uses VPMEM VHD
func TestLCOWUVMNoSCSISingleVPMemVHD(t *testing.T) {
opts := uvm.NewDefaultOptionsLCOW(t.Name(), "")
opts.SCSIControllerCount = 0
opts.VPMemDeviceCount = 1
opts.PreferredRootFSType = uvm.PreferredRootFSTypeVHD
opts.RootFSFile = uvm.VhdFile
testLCOWUVMNoSCSISingleVPMem(t, opts, `Command line: root=/dev/pmem0 init=/init`)
}
func testLCOWUVMNoSCSISingleVPMem(t *testing.T, opts *uvm.OptionsLCOW, expected string) {
testutilities.RequiresBuild(t, osversion.RS5)
lcowUVM := testutilities.CreateLCOWUVMFromOpts(t, opts)
defer lcowUVM.Close()
out, err := exec.Command(`hcsdiag`, `exec`, `-uvm`, lcowUVM.ID(), `dmesg`).Output() // TODO: Move the CreateProcess.
if err != nil {
t.Fatal(string(err.(*exec.ExitError).Stderr))
}
if !strings.Contains(string(out), expected) {
t.Fatalf("Expected dmesg output to have %q: %s", expected, string(out))
}
}
// TestLCOWTimeUVMStartVHD starts/terminates a utility VM booting from VPMem-
// attached root filesystem a number of times.
func TestLCOWTimeUVMStartVHD(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeVHD)
}
// TestLCOWUVMStart_KernelDirect_VHD starts/terminates a utility VM booting from
// VPMem- attached root filesystem a number of times starting from the Linux
// Kernel directly and skipping EFI.
func TestLCOWUVMStart_KernelDirect_VHD(t *testing.T) {
testutilities.RequiresBuild(t, 18286)
testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeVHD)
}
// TestLCOWTimeUVMStartInitRD starts/terminates a utility VM booting from initrd-
// attached root file system a number of times.
func TestLCOWTimeUVMStartInitRD(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
testLCOWTimeUVMStart(t, false, uvm.PreferredRootFSTypeInitRd)
}
// TestLCOWUVMStart_KernelDirect_InitRd starts/terminates a utility VM booting
// from initrd- attached root file system a number of times starting from the
// Linux Kernel directly and skipping EFI.
func TestLCOWUVMStart_KernelDirect_InitRd(t *testing.T) {
testutilities.RequiresBuild(t, 18286)
testLCOWTimeUVMStart(t, true, uvm.PreferredRootFSTypeInitRd)
}
func testLCOWTimeUVMStart(t *testing.T, kernelDirect bool, rfsType uvm.PreferredRootFSType) {
for i := 0; i < 3; i++ {
opts := uvm.NewDefaultOptionsLCOW(t.Name(), "")
opts.KernelDirect = kernelDirect
opts.VPMemDeviceCount = 32
opts.PreferredRootFSType = rfsType
switch opts.PreferredRootFSType {
case uvm.PreferredRootFSTypeInitRd:
opts.RootFSFile = uvm.InitrdFile
case uvm.PreferredRootFSTypeVHD:
opts.RootFSFile = uvm.VhdFile
}
lcowUVM := testutilities.CreateLCOWUVMFromOpts(t, opts)
lcowUVM.Close()
}
}
func TestLCOWSimplePodScenario(t *testing.T) {
t.Skip("Doesn't work quite yet")
testutilities.RequiresBuild(t, osversion.RS5)
alpineLayers := testutilities.LayerFolders(t, "alpine")
cacheDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(cacheDir)
cacheFile := filepath.Join(cacheDir, "cache.vhdx")
// This is what gets mounted into /tmp/scratch
uvmScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(uvmScratchDir)
uvmScratchFile := filepath.Join(uvmScratchDir, "uvmscratch.vhdx")
// Scratch for the first container
c1ScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(c1ScratchDir)
c1ScratchFile := filepath.Join(c1ScratchDir, "sandbox.vhdx")
// Scratch for the second container
c2ScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(c2ScratchDir)
c2ScratchFile := filepath.Join(c2ScratchDir, "sandbox.vhdx")
lcowUVM := testutilities.CreateLCOWUVM(t, "uvm")
defer lcowUVM.Close()
// Populate the cache and generate the scratch file for /tmp/scratch
if err := lcow.CreateScratch(lcowUVM, uvmScratchFile, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil {
t.Fatal(err)
}
if _, _, err := lcowUVM.AddSCSI(uvmScratchFile, `/tmp/scratch`, false); err != nil {
t.Fatal(err)
}
// Now create the first containers sandbox, populate a spec
if err := lcow.CreateScratch(lcowUVM, c1ScratchFile, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil {
t.Fatal(err)
}
c1Spec := testutilities.GetDefaultLinuxSpec(t)
c1Folders := append(alpineLayers, c1ScratchDir)
c1Spec.Windows.LayerFolders = c1Folders
c1Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "one"}
c1Opts := &hcsoci.CreateOptions{
Spec: c1Spec,
HostingSystem: lcowUVM,
}
// Now create the second containers sandbox, populate a spec
if err := lcow.CreateScratch(lcowUVM, c2ScratchFile, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil {
t.Fatal(err)
}
c2Spec := testutilities.GetDefaultLinuxSpec(t)
c2Folders := append(alpineLayers, c2ScratchDir)
c2Spec.Windows.LayerFolders = c2Folders
c2Spec.Process.Args = []string{"echo", "hello", "lcow", "container", "two"}
c2Opts := &hcsoci.CreateOptions{
Spec: c2Spec,
HostingSystem: lcowUVM,
}
// Create the two containers
c1hcsSystem, c1Resources, err := CreateContainerTestWrapper(c1Opts)
if err != nil {
t.Fatal(err)
}
c2hcsSystem, c2Resources, err := CreateContainerTestWrapper(c2Opts)
if err != nil {
t.Fatal(err)
}
// Start them. In the UVM, they'll be in the created state from runc's perspective after this.eg
/// # runc list
//ID PID STATUS BUNDLE CREATED OWNER
//3a724c2b-f389-5c71-0555-ebc6f5379b30 138 running /run/gcs/c/1 2018-06-04T21:23:39.1253911Z root
//7a8229a0-eb60-b515-55e7-d2dd63ffae75 158 created /run/gcs/c/2 2018-06-04T21:23:39.4249048Z root
if err := c1hcsSystem.Start(); err != nil {
t.Fatal(err)
}
defer hcsoci.ReleaseResources(c1Resources, lcowUVM, true)
if err := c2hcsSystem.Start(); err != nil {
t.Fatal(err)
}
defer hcsoci.ReleaseResources(c2Resources, lcowUVM, true)
// Start the init process in each container and grab it's stdout comparing to expected
runInitProcess(t, c1hcsSystem, "hello lcow container one")
runInitProcess(t, c2hcsSystem, "hello lcow container two")
}
// Helper to run the init process in an LCOW container; verify it exits with exit
// code 0; verify stderr is empty; check output is as expected.
func runInitProcess(t *testing.T, s *hcs.System, expected string) {
var outB, errB bytes.Buffer
p, bc, err := lcow.CreateProcess(&lcow.ProcessOptions{
HCSSystem: s,
Stdout: &outB,
Stderr: &errB,
CopyTimeout: 30 * time.Second,
})
if err != nil {
t.Fatal(err)
}
defer p.Close()
if bc.Err != 0 {
t.Fatalf("got %d bytes on stderr: %s", bc.Err, errB.String())
}
if strings.TrimSpace(outB.String()) != expected {
t.Fatalf("got %q (%d) expecting %q", outB.String(), bc.Out, expected)
}
}

View File

@ -0,0 +1,4 @@
package manifest
// This is so that tests can include the .syso to manifest them to pick up the right Windows build
// TODO: Auto-generation of the .syso through rsrc or similar.

Binary file not shown.

View File

@ -0,0 +1,3 @@
package functional
import _ "github.com/Microsoft/hcsshim/test/functional/manifest"

View File

@ -0,0 +1,47 @@
package functional
import (
"os"
"os/exec"
"strconv"
"time"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hcsoci"
"github.com/sirupsen/logrus"
)
var pauseDurationOnCreateContainerFailure time.Duration
func init() {
if len(os.Getenv("HCSSHIM_FUNCTIONAL_TESTS_DEBUG")) > 0 {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})
}
// This allows for debugging a utility VM.
s := os.Getenv("HCSSHIM_FUNCTIONAL_TESTS_PAUSE_ON_CREATECONTAINER_FAIL_IN_MINUTES")
if s != "" {
if t, err := strconv.Atoi(s); err == nil {
pauseDurationOnCreateContainerFailure = time.Duration(t) * time.Minute
}
}
// Try to stop any pre-existing compute processes
cmd := exec.Command("powershell", `get-computeprocess | stop-computeprocess -force`)
cmd.Run()
}
func CreateContainerTestWrapper(options *hcsoci.CreateOptions) (*hcs.System, *hcsoci.Resources, error) {
if pauseDurationOnCreateContainerFailure != 0 {
options.DoNotReleaseResourcesOnFailure = true
}
s, r, err := hcsoci.CreateContainer(options)
if err != nil {
logrus.Warnf("Test is pausing for %s for debugging CreateContainer failure", pauseDurationOnCreateContainerFailure)
time.Sleep(pauseDurationOnCreateContainerFailure)
hcsoci.ReleaseResources(r, options.HostingSystem, true)
}
return s, r, err
}

View File

@ -0,0 +1,76 @@
package testutilities
import (
"os"
"testing"
"github.com/Microsoft/hcsshim/internal/uvm"
)
// CreateWCOWUVM creates a WCOW utility VM with all default options. Returns the
// UtilityVM object; folder used as its scratch
func CreateWCOWUVM(t *testing.T, id, image string) (*uvm.UtilityVM, []string, string) {
return CreateWCOWUVMFromOptsWithImage(t, uvm.NewDefaultOptionsWCOW(id, ""), image)
}
// CreateWCOWUVMFromOpts creates a WCOW utility VM with the passed opts.
func CreateWCOWUVMFromOpts(t *testing.T, opts *uvm.OptionsWCOW) *uvm.UtilityVM {
if opts == nil || len(opts.LayerFolders) < 2 {
t.Fatalf("opts must bet set with LayerFolders")
}
uvm, err := uvm.CreateWCOW(opts)
if err != nil {
t.Fatal(err)
}
if err := uvm.Start(); err != nil {
uvm.Close()
t.Fatal(err)
}
return uvm
}
// CreateWCOWUVMFromOptsWithImage creates a WCOW utility VM with the passed opts
// builds the LayerFolders based on `image`. Returns the UtilityVM object;
// folder used as its scratch
func CreateWCOWUVMFromOptsWithImage(t *testing.T, opts *uvm.OptionsWCOW, image string) (*uvm.UtilityVM, []string, string) {
if opts == nil {
t.Fatal("opts must be set")
}
uvmLayers := LayerFolders(t, image)
scratchDir := CreateTempDir(t)
defer func() {
if t.Failed() {
os.RemoveAll(scratchDir)
}
}()
opts.LayerFolders = append(opts.LayerFolders, uvmLayers...)
opts.LayerFolders = append(opts.LayerFolders, scratchDir)
return CreateWCOWUVMFromOpts(t, opts), uvmLayers, scratchDir
}
// CreateLCOWUVM with all default options.
func CreateLCOWUVM(t *testing.T, id string) *uvm.UtilityVM {
return CreateLCOWUVMFromOpts(t, uvm.NewDefaultOptionsLCOW(id, ""))
}
// CreateLCOWUVMFromOpts creates an LCOW utility VM with the specified options.
func CreateLCOWUVMFromOpts(t *testing.T, opts *uvm.OptionsLCOW) *uvm.UtilityVM {
if opts == nil {
t.Fatal("opts must be set")
}
uvm, err := uvm.CreateLCOW(opts)
if err != nil {
t.Fatal(err)
}
if err := uvm.Start(); err != nil {
uvm.Close()
t.Fatal(err)
}
return uvm
}

View File

@ -0,0 +1,21 @@
package testutilities
import (
"encoding/json"
"io/ioutil"
"testing"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func GetDefaultLinuxSpec(t *testing.T) *specs.Spec {
content, err := ioutil.ReadFile(`assets\defaultlinuxspec.json`)
if err != nil {
t.Fatalf("failed to read defaultlinuxspec.json: %s", err.Error())
}
spec := specs.Spec{}
if err := json.Unmarshal(content, &spec); err != nil {
t.Fatalf("failed to unmarshal contents of defaultlinuxspec.json: %s", err.Error())
}
return &spec
}

View File

@ -0,0 +1,21 @@
package testutilities
import (
"encoding/json"
"io/ioutil"
"testing"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func GetDefaultWindowsSpec(t *testing.T) *specs.Spec {
content, err := ioutil.ReadFile(`assets\defaultwindowsspec.json`)
if err != nil {
t.Fatalf("failed to read defaultwindowsspec.json: %s", err.Error())
}
spec := specs.Spec{}
if err := json.Unmarshal(content, &spec); err != nil {
t.Fatalf("failed to unmarshal contents of defaultwindowsspec.json: %s", err.Error())
}
return &spec
}

View File

@ -0,0 +1,54 @@
package testutilities
import (
"bytes"
"encoding/json"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
)
var imageLayers map[string][]string
func init() {
imageLayers = make(map[string][]string)
}
func LayerFolders(t *testing.T, imageName string) []string {
if _, ok := imageLayers[imageName]; !ok {
imageLayers[imageName] = getLayers(t, imageName)
}
return imageLayers[imageName]
}
func getLayers(t *testing.T, imageName string) []string {
cmd := exec.Command("docker", "inspect", imageName, "-f", `"{{.GraphDriver.Data.dir}}"`)
var out bytes.Buffer
cmd.Stdout = &out
if err := cmd.Run(); err != nil {
t.Skipf("Failed to find layers for %q. Check docker images", imageName)
}
imagePath := strings.Replace(strings.TrimSpace(out.String()), `"`, ``, -1)
layers := getLayerChain(t, imagePath)
return append([]string{imagePath}, layers...)
}
func getLayerChain(t *testing.T, layerFolder string) []string {
jPath := filepath.Join(layerFolder, "layerchain.json")
content, err := ioutil.ReadFile(jPath)
if os.IsNotExist(err) {
t.Fatalf("layerchain not found")
} else if err != nil {
t.Fatalf("failed to read layerchain")
}
var layerChain []string
err = json.Unmarshal(content, &layerChain)
if err != nil {
t.Fatalf("failed to unmarshal layerchain")
}
return layerChain
}

View File

@ -0,0 +1,19 @@
package testutilities
import (
"testing"
"github.com/Microsoft/hcsshim/osversion"
)
func RequiresBuild(t *testing.T, b uint16) {
if osversion.Get().Build < b {
t.Skipf("Requires build %d+", b)
}
}
func RequiresExactBuild(t *testing.T, b uint16) {
if osversion.Get().Build != b {
t.Skipf("Requires exact build %d", b)
}
}

View File

@ -0,0 +1,59 @@
package testutilities
import (
"path/filepath"
"testing"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/wclayer"
)
const lcowGlobalSVMID = "test.lcowglobalsvm"
var (
lcowGlobalSVM *uvm.UtilityVM
lcowCacheScratchFile string
)
func init() {
if hcsSystem, err := hcs.OpenComputeSystem(lcowGlobalSVMID); err == nil {
hcsSystem.Terminate()
}
}
// CreateWCOWBlankRWLayer uses HCS to create a temp test directory containing a
// read-write layer containing a disk that can be used as a containers scratch
// space. The VHD is created with VM group access
// TODO: This is wrong. Need to search the folders.
func CreateWCOWBlankRWLayer(t *testing.T, imageLayers []string) string {
// uvmFolder, err := LocateUVMFolder(imageLayers)
// if err != nil {
// t.Fatalf("failed to locate UVM folder from %+v: %s", imageLayers, err)
// }
tempDir := CreateTempDir(t)
if err := wclayer.CreateScratchLayer(tempDir, imageLayers); err != nil {
t.Fatalf("Failed CreateScratchLayer: %s", err)
}
return tempDir
}
// CreateLCOWBlankRWLayer uses an LCOW utility VM to create a blank
// VHDX and format it ext4. If vmID is supplied, it grants access to the
// destination file. This can then be used as a scratch space for a container,
// or for a "service VM".
func CreateLCOWBlankRWLayer(t *testing.T, vmID string) string {
if lcowGlobalSVM == nil {
lcowGlobalSVM = CreateLCOWUVM(t, lcowGlobalSVMID)
lcowCacheScratchFile = filepath.Join(CreateTempDir(t), "sandbox.vhdx")
}
tempDir := CreateTempDir(t)
if err := lcow.CreateScratch(lcowGlobalSVM, filepath.Join(tempDir, "sandbox.vhdx"), lcow.DefaultScratchSizeGB, lcowCacheScratchFile, vmID); err != nil {
t.Fatalf("failed to create EXT4 scratch for LCOW test cases: %s", err)
}
return tempDir
}

View File

@ -0,0 +1,15 @@
package testutilities
import (
"io/ioutil"
"testing"
)
// CreateTempDir creates a temporary directory
func CreateTempDir(t *testing.T) string {
tempDir, err := ioutil.TempDir("", "test")
if err != nil {
t.Fatalf("failed to create temporary directory: %s", err)
}
return tempDir
}

View File

@ -0,0 +1,107 @@
// +build functional uvmmem
package functional
import (
"io/ioutil"
"os"
"testing"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
testutilities "github.com/Microsoft/hcsshim/test/functional/utilities"
"github.com/sirupsen/logrus"
)
func runMemStartLCOWTest(t *testing.T, opts *uvm.OptionsLCOW) {
u := testutilities.CreateLCOWUVMFromOpts(t, opts)
u.Close()
}
func runMemStartWCOWTest(t *testing.T, opts *uvm.OptionsWCOW) {
u, _, scratchDir := testutilities.CreateWCOWUVMFromOptsWithImage(t, opts, "microsoft/nanoserver")
defer os.RemoveAll(scratchDir)
u.Close()
}
func runMemTests(t *testing.T, os string) {
type testCase struct {
allowOvercommit bool
enableDeferredCommit bool
}
testCases := []testCase{
{allowOvercommit: true, enableDeferredCommit: false}, // Explicit default - Virtual
{allowOvercommit: true, enableDeferredCommit: true}, // Virtual Deferred
{allowOvercommit: false, enableDeferredCommit: false}, // Physical
}
for _, bt := range testCases {
if os == "windows" {
wopts := uvm.NewDefaultOptionsWCOW(t.Name(), "")
wopts.MemorySizeInMB = 512
wopts.AllowOvercommit = bt.allowOvercommit
wopts.EnableDeferredCommit = bt.enableDeferredCommit
runMemStartWCOWTest(t, wopts)
} else {
lopts := uvm.NewDefaultOptionsLCOW(t.Name(), "")
lopts.MemorySizeInMB = 512
lopts.AllowOvercommit = bt.allowOvercommit
lopts.EnableDeferredCommit = bt.enableDeferredCommit
runMemStartLCOWTest(t, lopts)
}
}
}
func TestMemBackingTypeWCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
runMemTests(t, "windows")
}
func TestMemBackingTypeLCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
runMemTests(t, "linux")
}
func runBenchMemStartTest(b *testing.B, opts *uvm.OptionsLCOW) {
// Cant use testutilities here because its `testing.B` not `testing.T`
u, err := uvm.CreateLCOW(opts)
if err != nil {
b.Fatal(err)
}
defer u.Close()
if err := u.Start(); err != nil {
b.Fatal(err)
}
}
func runBenchMemStartLcowTest(b *testing.B, allowOvercommit bool, enableDeferredCommit bool) {
for i := 0; i < b.N; i++ {
opts := uvm.NewDefaultOptionsLCOW(b.Name(), "")
opts.MemorySizeInMB = 512
opts.AllowOvercommit = allowOvercommit
opts.EnableDeferredCommit = enableDeferredCommit
runBenchMemStartTest(b, opts)
}
}
func BenchmarkMemBackingTypeVirtualLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, true, false)
}
func BenchmarkMemBackingTypeVirtualDeferredLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, true, true)
}
func BenchmarkMemBackingTypePhyscialLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, false, false)
}

View File

@ -0,0 +1,40 @@
// +build functional uvmp9
// This file isn't called uvm_plan9_test.go as go test skips when a number is in it... go figure (pun intended)
package functional
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
)
// TestPlan9 tests adding/removing Plan9 shares to/from a v2 Linux utility VM
// TODO: This is very basic. Need multiple shares and so-on. Can be iterated on later.
func TestPlan9(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
uvm := testutilities.CreateLCOWUVM(t, t.Name())
defer uvm.Close()
dir := testutilities.CreateTempDir(t)
defer os.RemoveAll(dir)
var iterations uint32 = 64
for i := 0; i < int(iterations); i++ {
if err := uvm.AddPlan9(dir, fmt.Sprintf("/tmp/%s", filepath.Base(dir)), false); err != nil {
t.Fatalf("AddPlan9 failed: %s", err)
}
}
// Remove them all
for i := 0; i < int(iterations); i++ {
if err := uvm.RemovePlan9(dir); err != nil {
t.Fatalf("RemovePlan9 failed: %s", err)
}
}
}

View File

@ -0,0 +1,48 @@
// +build functional uvmproperties
package functional
import (
"os"
"testing"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
)
func TestPropertiesGuestConnection_LCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
uvm := testutilities.CreateLCOWUVM(t, t.Name())
defer uvm.Close()
p, err := uvm.ComputeSystem().Properties(schema1.PropertyTypeGuestConnection)
if err != nil {
t.Fatalf("Failed to query properties: %s", err)
}
if p.GuestConnectionInfo.GuestDefinedCapabilities.NamespaceAddRequestSupported ||
!p.GuestConnectionInfo.GuestDefinedCapabilities.SignalProcessSupported ||
p.GuestConnectionInfo.ProtocolVersion < 4 {
t.Fatalf("unexpected values: %+v", p.GuestConnectionInfo)
}
}
func TestPropertiesGuestConnection_WCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
uvm, _, uvmScratchDir := testutilities.CreateWCOWUVM(t, t.Name(), "microsoft/nanoserver")
defer os.RemoveAll(uvmScratchDir)
defer uvm.Close()
p, err := uvm.ComputeSystem().Properties(schema1.PropertyTypeGuestConnection)
if err != nil {
t.Fatalf("Failed to query properties: %s", err)
}
if !p.GuestConnectionInfo.GuestDefinedCapabilities.NamespaceAddRequestSupported ||
!p.GuestConnectionInfo.GuestDefinedCapabilities.SignalProcessSupported ||
p.GuestConnectionInfo.ProtocolVersion < 4 {
t.Fatalf("unexpected values: %+v", p.GuestConnectionInfo)
}
}

View File

@ -0,0 +1,114 @@
// +build functional uvmscratch
package functional
import (
"os"
"path/filepath"
"testing"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
)
func TestScratchCreateLCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
tempDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(tempDir)
firstUVM := testutilities.CreateLCOWUVM(t, "TestCreateLCOWScratch")
defer firstUVM.Close()
cacheFile := filepath.Join(tempDir, "cache.vhdx")
destOne := filepath.Join(tempDir, "destone.vhdx")
destTwo := filepath.Join(tempDir, "desttwo.vhdx")
if err := lcow.CreateScratch(firstUVM, destOne, lcow.DefaultScratchSizeGB, cacheFile, ""); err != nil {
t.Fatal(err)
}
if _, err := os.Stat(destOne); err != nil {
t.Fatalf("destone wasn't created!")
}
if _, err := os.Stat(cacheFile); err != nil {
t.Fatalf("cacheFile wasn't created!")
}
targetUVM := testutilities.CreateLCOWUVM(t, "TestCreateLCOWScratch_target")
defer targetUVM.Close()
// A non-cached create
if err := lcow.CreateScratch(firstUVM, destTwo, lcow.DefaultScratchSizeGB, cacheFile, targetUVM.ID()); err != nil {
t.Fatal(err)
}
// Make sure it can be added (verifies it has access correctly)
c, l, err := targetUVM.AddSCSI(destTwo, "", false)
if err != nil {
t.Fatal(err)
}
if c != 0 && l != 0 {
t.Fatal(err)
}
// TODO Could consider giving it a host path and verifying it's contents somehow
}
// TODO This is old test which should go here.
//// createLCOWTempDirWithSandbox uses an LCOW utility VM to create a blank
//// VHDX and format it ext4.
//func TestCreateLCOWScratch(t *testing.T) {
// t.Skip("for now")
// cacheDir := createTempDir(t)
// cacheFile := filepath.Join(cacheDir, "cache.vhdx")
// uvm, err := CreateContainer(&CreateOptions{Spec: getDefaultLinuxSpec(t)})
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer uvm.Terminate()
// if err := uvm.Start(); err != nil {
// t.Fatalf("Failed to start service container: %s", err)
// }
// // 1: Default size, cache doesn't exist, but no UVM passed. Cannot be created
// err = CreateLCOWScratch(nil, filepath.Join(cacheDir, "default.vhdx"), lcow.DefaultScratchSizeGB, cacheFile)
// if err == nil {
// t.Fatalf("expected an error creating LCOW scratch")
// }
// if err.Error() != "cannot create scratch disk as cache is not present and no utility VM supplied" {
// t.Fatalf("Not expecting error %s", err)
// }
// // 2: Default size, no cache supplied and no UVM
// err = CreateLCOWScratch(nil, filepath.Join(cacheDir, "default.vhdx"), lcow.DefaultScratchSizeGB, "")
// if err == nil {
// t.Fatalf("expected an error creating LCOW scratch")
// }
// if err.Error() != "cannot create scratch disk as cache is not present and no utility VM supplied" {
// t.Fatalf("Not expecting error %s", err)
// }
// // 3: Default size. This should work and the cache should be created.
// err = CreateLCOWScratch(uvm, filepath.Join(cacheDir, "default.vhdx"), lcow.DefaultScratchSizeGB, cacheFile)
// if err != nil {
// t.Fatalf("should succeed creating default size cache file: %s", err)
// }
// if _, err = os.Stat(cacheFile); err != nil {
// t.Fatalf("failed to stat cache file after created: %s", err)
// }
// if _, err = os.Stat(filepath.Join(cacheDir, "default.vhdx")); err != nil {
// t.Fatalf("failed to stat default.vhdx after created: %s", err)
// }
// // 4: Non-defaultsize. This should work and the cache should be created.
// err = CreateLCOWScratch(uvm, filepath.Join(cacheDir, "nondefault.vhdx"), lcow.DefaultScratchSizeGB+1, cacheFile)
// if err != nil {
// t.Fatalf("should succeed creating default size cache file: %s", err)
// }
// if _, err = os.Stat(cacheFile); err != nil {
// t.Fatalf("failed to stat cache file after created: %s", err)
// }
// if _, err = os.Stat(filepath.Join(cacheDir, "nondefault.vhdx")); err != nil {
// t.Fatalf("failed to stat default.vhdx after created: %s", err)
// }
//}

View File

@ -0,0 +1,119 @@
// +build functional uvmscsi
package functional
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
"github.com/sirupsen/logrus"
)
// TestSCSIAddRemovev2LCOW validates adding and removing SCSI disks
// from a utility VM in both attach-only and with a container path. Also does
// negative testing so that a disk can't be attached twice.
func TestSCSIAddRemoveLCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
u := testutilities.CreateLCOWUVM(t, t.Name())
defer u.Close()
testSCSIAddRemove(t, u, `/`, "linux", []string{})
}
// TestSCSIAddRemoveWCOW validates adding and removing SCSI disks
// from a utility VM in both attach-only and with a container path. Also does
// negative testing so that a disk can't be attached twice.
func TestSCSIAddRemoveWCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
u, layers, uvmScratchDir := testutilities.CreateWCOWUVM(t, t.Name(), "microsoft/nanoserver")
defer os.RemoveAll(uvmScratchDir)
defer u.Close()
testSCSIAddRemove(t, u, `c:\`, "windows", layers)
}
func testSCSIAddRemove(t *testing.T, u *uvm.UtilityVM, pathPrefix string, operatingSystem string, wcowImageLayerFolders []string) {
numDisks := 63 // Windows: 63 as the UVM scratch is at 0:0
if operatingSystem == "linux" {
numDisks++ //
}
// Create a bunch of directories each containing sandbox.vhdx
disks := make([]string, numDisks)
for i := 0; i < numDisks; i++ {
tempDir := ""
if operatingSystem == "windows" {
tempDir = testutilities.CreateWCOWBlankRWLayer(t, wcowImageLayerFolders)
} else {
tempDir = testutilities.CreateLCOWBlankRWLayer(t, u.ID())
}
defer os.RemoveAll(tempDir)
disks[i] = filepath.Join(tempDir, `sandbox.vhdx`)
}
// Add each of the disks to the utility VM. Attach-only, no container path
logrus.Debugln("First - adding in attach-only")
for i := 0; i < numDisks; i++ {
_, _, err := u.AddSCSI(disks[i], "", false)
if err != nil {
t.Fatalf("failed to add scsi disk %d %s: %s", i, disks[i], err)
}
}
// Try to re-add. These should all fail.
logrus.Debugln("Next - trying to re-add")
for i := 0; i < numDisks; i++ {
_, _, err := u.AddSCSI(disks[i], "", false)
if err == nil {
t.Fatalf("should not be able to re-add the same SCSI disk!")
}
if err != uvm.ErrAlreadyAttached {
t.Fatalf("expecting %s, got %s", uvm.ErrAlreadyAttached, err)
}
}
// Remove them all
logrus.Debugln("Removing them all")
for i := 0; i < numDisks; i++ {
if err := u.RemoveSCSI(disks[i]); err != nil {
t.Fatalf("expected success: %s", err)
}
}
// Now re-add but providing a container path
logrus.Debugln("Next - re-adding with a container path")
for i := 0; i < numDisks; i++ {
_, _, err := u.AddSCSI(disks[i], fmt.Sprintf(`%s%d`, pathPrefix, i), false)
if err != nil {
t.Fatalf("failed to add scsi disk %d %s: %s", i, disks[i], err)
}
}
// Try to re-add. These should all fail.
logrus.Debugln("Next - trying to re-add")
for i := 0; i < numDisks; i++ {
_, _, err := u.AddSCSI(disks[i], fmt.Sprintf(`%s%d`, pathPrefix, i), false)
if err == nil {
t.Fatalf("should not be able to re-add the same SCSI disk!")
}
if err != uvm.ErrAlreadyAttached {
t.Fatalf("expecting %s, got %s", uvm.ErrAlreadyAttached, err)
}
}
// Remove them all
logrus.Debugln("Next - Removing them")
for i := 0; i < numDisks; i++ {
if err := u.RemoveSCSI(disks[i]); err != nil {
t.Fatalf("expected success: %s", err)
}
}
// TODO: Could extend to validate can't add a 64th disk (windows). 65th (linux).
}

View File

@ -0,0 +1,48 @@
// +build functional uvmvpmem
package functional
import (
"os"
"path/filepath"
"testing"
"github.com/Microsoft/hcsshim/internal/copyfile"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
"github.com/sirupsen/logrus"
)
// TestVPMEM tests adding/removing VPMem Read-Only layers from a v2 Linux utility VM
func TestVPMEM(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
alpineLayers := testutilities.LayerFolders(t, "alpine")
u := testutilities.CreateLCOWUVM(t, t.Name())
defer u.Close()
var iterations uint32 = uvm.MaxVPMEMCount
// Use layer.vhd from the alpine image as something to add
tempDir := testutilities.CreateTempDir(t)
if err := copyfile.CopyFile(filepath.Join(alpineLayers[0], "layer.vhd"), filepath.Join(tempDir, "layer.vhd"), true); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
for i := 0; i < int(iterations); i++ {
deviceNumber, uvmPath, err := u.AddVPMEM(filepath.Join(tempDir, "layer.vhd"), true)
if err != nil {
t.Fatalf("AddVPMEM failed: %s", err)
}
logrus.Debugf("exposed as %s on %d", uvmPath, deviceNumber)
}
// Remove them all
for i := 0; i < int(iterations); i++ {
if err := u.RemoveVPMEM(filepath.Join(tempDir, "layer.vhd")); err != nil {
t.Fatalf("RemoveVPMEM failed: %s", err)
}
}
}

View File

@ -0,0 +1,45 @@
// +build functional uvmvsmb
package functional
import (
"os"
"testing"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
)
// TestVSMB tests adding/removing VSMB layers from a v2 Windows utility VM
func TestVSMB(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
uvm, _, uvmScratchDir := testutilities.CreateWCOWUVM(t, t.Name(), "microsoft/nanoserver")
defer os.RemoveAll(uvmScratchDir)
defer uvm.Close()
dir := testutilities.CreateTempDir(t)
defer os.RemoveAll(dir)
var iterations uint32 = 64
options := &hcsschema.VirtualSmbShareOptions{
ReadOnly: true,
PseudoOplocks: true,
TakeBackupPrivilege: true,
CacheIo: true,
ShareRead: true,
}
for i := 0; i < int(iterations); i++ {
if err := uvm.AddVSMB(dir, "", options); err != nil {
t.Fatalf("AddVSMB failed: %s", err)
}
}
// Remove them all
for i := 0; i < int(iterations); i++ {
if err := uvm.RemoveVSMB(dir); err != nil {
t.Fatalf("RemoveVSMB failed: %s", err)
}
}
}
// TODO: VSMB for mapped directories

View File

@ -0,0 +1,731 @@
// +build functional wcow
package functional
import (
"bytes"
"os"
"path/filepath"
"strings"
"testing"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hcsoci"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvmfolder"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/Microsoft/hcsshim/internal/wcow"
"github.com/Microsoft/hcsshim/osversion"
"github.com/Microsoft/hcsshim/test/functional/utilities"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// Has testing for Windows containers using both the older hcsshim methods,
// and the newer hcsoci methods. Does the same thing in six different ways:
// - hcsshim/argon
// - hcsshim/xenon
// - hcsoci/argon v1
// - hcsoci/xenon v1
// - hcsoci/argon v2
// - hcsoci/xenon v2
//
// Sample v1 HCS document for Xenon (no networking):
//
//{
// "SystemType": "Container",
// "Name": "48347b95d0ad4f37de6d1979b986fb65912f973ad4549fbe716e848679dfa25c",
// "IgnoreFlushesDuringBoot": true,
// "LayerFolderPath": "C:\\layers\\48347b95d0ad4f37de6d1979b986fb65912f973ad4549fbe716e848679dfa25c",
// "Layers": [
// {
// "ID": "7095521e-b79e-50fc-bafb-958d85400362",
// "Path": "C:\\layers\\f9b22d909166dd54b870eb699d54f4cf36d99f035ffd7701aff1267230aefd1e"
// }
// ],
// "HvPartition": true,
// "HvRuntime": {
// "ImagePath": "C:\\layers\\f9b22d909166dd54b870eb699d54f4cf36d99f035ffd7701aff1267230aefd1e\\UtilityVM"
// }
//}
//
// Sample v1 HCS document for Argon (no networking):
//
//{
// "SystemType": "Container",
// "Name": "0a8bb9ec8366aa48a8e5f810274701d8d4452989bf268fc338570bfdecddf8df",
// "VolumePath": "\\\\?\\Volume{85da95c9-dda9-42e0-a066-40bd120c6f3c}",
// "IgnoreFlushesDuringBoot": true,
// "LayerFolderPath": "C:\\layers\\0a8bb9ec8366aa48a8e5f810274701d8d4452989bf268fc338570bfdecddf8df",
// "Layers": [
// {
// "ID": "7095521e-b79e-50fc-bafb-958d85400362",
// "Path": "C:\\layers\\f9b22d909166dd54b870eb699d54f4cf36d99f035ffd7701aff1267230aefd1e"
// }
// ],
// "HvPartition": false
//}
//
// Sample v2 HCS document for Argon (no networking):
//
//{
// "Owner": "sample",
// "SchemaVersion": {
// "Major": 2,
// "Minor": 0
// },
// "Container": {
// "Storage": {
// "Layers": [
// {
// "Id": "6ba9cac1-7086-5ee9-a197-c465d3f50ad7",
// "Path": "C:\\layers\\f30368666ce4457e86fe12867506e508071d89e7eae615fc389c64f2e37ce54e"
// },
// {
// "Id": "300b3ac0-b603-5367-9494-afec045dd369",
// "Path": "C:\\layers\\7a6ad2b849a9d29e6648d9950c1975b0f614a63b5fe2803009ce131745abcc62"
// },
// {
// "Id": "fa3057d9-0d4b-54c0-b2d5-34b7afc78f91",
// "Path": "C:\\layers\\5d1332fe416f7932c344ce9c536402a6fc6d0bfcdf7a74f67cc67b8cfc66ab41"
// },
// {
// "Id": "23284a2c-cdda-582a-a175-a196211b03cb",
// "Path": "C:\\layers\\b95977ad18f8fa04e517daa2e814f73d69bfff55c3ea68d56f2b0b8ae23a235d"
// },
// {
// "Id": "e0233918-d93f-5b08-839e-0cbeda79b68b",
// "Path": "C:\\layers\\b2a444ff0e984ef282d6a8e24fa0108e76b6807d943e111a0e878c1c53ed8246"
// },
// {
// "Id": "02740e08-d1d3-5715-9c08-c255eab4ca01",
// "Path": "C:\\layers\\de6b1a908240cca2aef34f49994e7d4e25a8e157a2cef3b6d6cf2d8e6400bfc2"
// }
// ],
// "Path": "\\\\?\\Volume{baac0fd5-16b7-405b-9621-112aa8e3d973}\\"
// }
// },
// "ShouldTerminateOnLastHandleClosed": true
//}
//
//
// Sample v2 HCS document for Xenon (no networking)
//
//{
// "Owner": "functional.test.exe",
// "SchemaVersion": {
// "Major": 2,
// "Minor": 0
// },
// "HostingSystemId": "xenonOci2UVM",
// "HostedSystem": {
// "SchemaVersion": {
// "Major": 2,
// "Minor": 0
// },
// "Container": {
// "Storage": {
// "Layers": [
// {
// "Id": "6ba9cac1-7086-5ee9-a197-c465d3f50ad7",
// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s1"
// },
// {
// "Id": "300b3ac0-b603-5367-9494-afec045dd369",
// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s2"
// },
// {
// "Id": "fa3057d9-0d4b-54c0-b2d5-34b7afc78f91",
// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s3"
// },
// {
// "Id": "23284a2c-cdda-582a-a175-a196211b03cb",
// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\4"
// },
// {
// "Id": "e0233918-d93f-5b08-839e-0cbeda79b68b",
// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s5"
// },
// {
// "Id": "02740e08-d1d3-5715-9c08-c255eab4ca01",
// "Path": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s6"
// }
// ],
// "Path": "C:\\c\\1\\scratch"
// },
// "MappedDirectories": [
// {
// "HostPath": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s7",
// "ContainerPath": "c:\\mappedro",
// "ReadOnly": true
// },
// {
// "HostPath": "\\\\?\\VMSMB\\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\\s8",
// "ContainerPath": "c:\\mappedrw"
// }
// ]
// }
// },
// "ShouldTerminateOnLastHandleClosed": true
//}
// Helper to start a container.
// Ones created through hcsoci methods will be of type *hcs.System.
// Ones created through hcsshim methods will be of type hcsshim.Container
func startContainer(t *testing.T, c interface{}) {
var err error
switch c.(type) {
case *hcs.System:
err = c.(*hcs.System).Start()
case hcsshim.Container:
err = c.(hcsshim.Container).Start()
default:
t.Fatal("unknown type")
}
if err != nil {
t.Fatalf("Failed start: %s", err)
}
}
// Helper to stop a container.
// Ones created through hcsoci methods will be of type *hcs.System.
// Ones created through hcsshim methods will be of type hcsshim.Container
func stopContainer(t *testing.T, c interface{}) {
switch c.(type) {
case *hcs.System:
if err := c.(*hcs.System).Shutdown(); err != nil {
if hcsshim.IsPending(err) {
if err := c.(*hcs.System).Wait(); err != nil {
t.Fatalf("Failed Wait shutdown: %s", err)
}
} else {
t.Fatalf("Failed shutdown: %s", err)
}
}
c.(*hcs.System).Terminate()
case hcsshim.Container:
if err := c.(hcsshim.Container).Shutdown(); err != nil {
if hcsshim.IsPending(err) {
if err := c.(hcsshim.Container).Wait(); err != nil {
t.Fatalf("Failed Wait shutdown: %s", err)
}
} else {
t.Fatalf("Failed shutdown: %s", err)
}
}
c.(hcsshim.Container).Terminate()
default:
t.Fatalf("unknown type")
}
}
// Helper to launch a process in a container created through the hcsshim methods.
// At the point of calling, the container must have been successfully created.
func runShimCommand(t *testing.T,
c hcsshim.Container,
command string,
workdir string,
expectedExitCode int,
expectedOutput string) {
if c == nil {
t.Fatalf("requested container to start is nil!")
}
p, err := c.CreateProcess(&hcsshim.ProcessConfig{
CommandLine: command,
WorkingDirectory: workdir,
CreateStdInPipe: true,
CreateStdOutPipe: true,
CreateStdErrPipe: true,
})
if err != nil {
t.Fatalf("Failed Create Process: %s", err)
}
defer p.Close()
if err := p.Wait(); err != nil {
t.Fatalf("Failed Wait Process: %s", err)
}
exitCode, err := p.ExitCode()
if err != nil {
t.Fatalf("Failed to obtain process exit code: %s", err)
}
if exitCode != expectedExitCode {
t.Fatalf("Exit code from %s wasn't %d (%d)", command, expectedExitCode, exitCode)
}
_, o, _, err := p.Stdio()
if err != nil {
t.Fatalf("Failed to get Stdio handles for process: %s", err)
}
buf := new(bytes.Buffer)
buf.ReadFrom(o)
out := strings.TrimSpace(buf.String())
if expectedOutput != "" {
if out != expectedOutput {
t.Fatalf("Failed to get %q from process: %q", expectedOutput, out)
}
}
}
func runShimCommands(t *testing.T, c hcsshim.Container) {
runShimCommand(t, c, `echo Hello`, `c:\`, 0, "Hello")
// Check that read-only doesn't allow deletion or creation
runShimCommand(t, c, `ls c:\mappedro\readonly`, `c:\`, 0, `c:\mappedro\readonly`)
runShimCommand(t, c, `rm c:\mappedro\readonly`, `c:\`, 1, "")
runShimCommand(t, c, `cp readonly fail`, `c:\mappedro`, 1, "")
runShimCommand(t, c, `ls`, `c:\mappedro`, 0, `readonly`)
// Check that read-write allows new file creation and removal
runShimCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`)
runShimCommand(t, c, `cp readwrite succeeds`, `c:\mappedrw`, 0, "")
runShimCommand(t, c, `ls`, `c:\mappedrw`, 0, "readwrite\nsucceeds")
runShimCommand(t, c, `rm succeeds`, `c:\mappedrw`, 0, "")
runShimCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`)
}
func runHcsCommands(t *testing.T, c *hcs.System) {
runHcsCommand(t, c, `echo Hello`, `c:\`, 0, "Hello")
// Check that read-only doesn't allow deletion or creation
runHcsCommand(t, c, `ls c:\mappedro\readonly`, `c:\`, 0, `c:\mappedro\readonly`)
runHcsCommand(t, c, `rm c:\mappedro\readonly`, `c:\`, 1, "")
runHcsCommand(t, c, `cp readonly fail`, `c:\mappedro`, 1, "")
runHcsCommand(t, c, `ls`, `c:\mappedro`, 0, `readonly`)
// Check that read-write allows new file creation and removal
runHcsCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`)
runHcsCommand(t, c, `cp readwrite succeeds`, `c:\mappedrw`, 0, "")
runHcsCommand(t, c, `ls`, `c:\mappedrw`, 0, "readwrite\nsucceeds")
runHcsCommand(t, c, `rm succeeds`, `c:\mappedrw`, 0, "")
runHcsCommand(t, c, `ls`, `c:\mappedrw`, 0, `readwrite`)
}
// Helper to launch a process in a container created through the hcsshim methods.
// At the point of calling, the container must have been successfully created.
func runHcsCommand(t *testing.T,
c *hcs.System,
command string,
workdir string,
expectedExitCode int,
expectedOutput string) {
if c == nil {
t.Fatalf("requested container to start is nil!")
}
p, err := c.CreateProcess(&hcsshim.ProcessConfig{
CommandLine: command,
WorkingDirectory: workdir,
CreateStdInPipe: true,
CreateStdOutPipe: true,
CreateStdErrPipe: true,
})
if err != nil {
t.Fatalf("Failed Create Process: %s", err)
}
defer p.Close()
if err := p.Wait(); err != nil {
t.Fatalf("Failed Wait Process: %s", err)
}
exitCode, err := p.ExitCode()
if err != nil {
t.Fatalf("Failed to obtain process exit code: %s", err)
}
if exitCode != expectedExitCode {
t.Fatalf("Exit code from %s wasn't %d (%d)", command, expectedExitCode, exitCode)
}
_, o, _, err := p.Stdio()
if err != nil {
t.Fatalf("Failed to get Stdio handles for process: %s", err)
}
buf := new(bytes.Buffer)
buf.ReadFrom(o)
out := strings.TrimSpace(buf.String())
if expectedOutput != "" {
if out != expectedOutput {
t.Fatalf("Failed to get %q from process: %q", expectedOutput, out)
}
}
}
// busybox is used as it has lots of layers. Exercises more code.
// Also the commands are more flexible for verification
const imageName = "busyboxw"
// Creates two temp folders used for the mounts/mapped directories
func createTestMounts(t *testing.T) (string, string) {
// Create two temp folders for mapped directories.
hostRWSharedDirectory := testutilities.CreateTempDir(t)
hostROSharedDirectory := testutilities.CreateTempDir(t)
fRW, _ := os.OpenFile(filepath.Join(hostRWSharedDirectory, "readwrite"), os.O_RDWR|os.O_CREATE, 0755)
fRO, _ := os.OpenFile(filepath.Join(hostROSharedDirectory, "readonly"), os.O_RDWR|os.O_CREATE, 0755)
fRW.Close()
fRO.Close()
return hostRWSharedDirectory, hostROSharedDirectory
}
// For calling hcsshim interface, need hcsshim.Layer built from an images layer folders
func generateShimLayersStruct(t *testing.T, imageLayers []string) []hcsshim.Layer {
var layers []hcsshim.Layer
for _, layerFolder := range imageLayers {
guid, _ := wclayer.NameToGuid(filepath.Base(layerFolder))
layers = append(layers, hcsshim.Layer{Path: layerFolder, ID: guid.String()})
}
return layers
}
// Argon through HCSShim interface (v1)
func TestWCOWArgonShim(t *testing.T) {
imageLayers := testutilities.LayerFolders(t, imageName)
argonShimMounted := false
argonShimScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(argonShimScratchDir)
if err := wclayer.CreateScratchLayer(argonShimScratchDir, imageLayers); err != nil {
t.Fatalf("failed to create argon scratch layer: %s", err)
}
hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t)
defer os.RemoveAll(hostRWSharedDirectory)
defer os.RemoveAll(hostROSharedDirectory)
layers := generateShimLayersStruct(t, imageLayers)
// For cleanup on failure
defer func() {
if argonShimMounted {
hcsoci.UnmountContainerLayers(append(imageLayers, argonShimScratchDir), "", nil, hcsoci.UnmountOperationAll)
}
}()
// This is a cheat but stops us re-writing exactly the same code just for test
argonShimLocalMountPath, err := hcsoci.MountContainerLayers(append(imageLayers, argonShimScratchDir), "", nil)
if err != nil {
t.Fatal(err)
}
argonShimMounted = true
argonShim, err := hcsshim.CreateContainer("argon", &hcsshim.ContainerConfig{
SystemType: "Container",
Name: "argonShim",
VolumePath: argonShimLocalMountPath.(string),
LayerFolderPath: argonShimScratchDir,
Layers: layers,
MappedDirectories: []schema1.MappedDir{
{
HostPath: hostROSharedDirectory,
ContainerPath: `c:\mappedro`,
ReadOnly: true,
},
{
HostPath: hostRWSharedDirectory,
ContainerPath: `c:\mappedrw`,
},
},
HvRuntime: nil,
})
if err != nil {
t.Fatal(err)
}
startContainer(t, argonShim)
runShimCommands(t, argonShim)
stopContainer(t, argonShim)
if err := hcsoci.UnmountContainerLayers(append(imageLayers, argonShimScratchDir), "", nil, hcsoci.UnmountOperationAll); err != nil {
t.Fatal(err)
}
argonShimMounted = false
}
// Xenon through HCSShim interface (v1)
func TestWCOWXenonShim(t *testing.T) {
imageLayers := testutilities.LayerFolders(t, imageName)
xenonShimScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(xenonShimScratchDir)
if err := wclayer.CreateScratchLayer(xenonShimScratchDir, imageLayers); err != nil {
t.Fatalf("failed to create xenon scratch layer: %s", err)
}
hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t)
defer os.RemoveAll(hostRWSharedDirectory)
defer os.RemoveAll(hostROSharedDirectory)
uvmImagePath, err := uvmfolder.LocateUVMFolder(imageLayers)
if err != nil {
t.Fatalf("LocateUVMFolder failed %s", err)
}
layers := generateShimLayersStruct(t, imageLayers)
xenonShim, err := hcsshim.CreateContainer("xenon", &hcsshim.ContainerConfig{
SystemType: "Container",
Name: "xenonShim",
LayerFolderPath: xenonShimScratchDir,
Layers: layers,
HvRuntime: &hcsshim.HvRuntime{ImagePath: filepath.Join(uvmImagePath, "UtilityVM")},
HvPartition: true,
MappedDirectories: []schema1.MappedDir{
{
HostPath: hostROSharedDirectory,
ContainerPath: `c:\mappedro`,
ReadOnly: true,
},
{
HostPath: hostRWSharedDirectory,
ContainerPath: `c:\mappedrw`,
},
},
})
if err != nil {
t.Fatal(err)
}
startContainer(t, xenonShim)
runShimCommands(t, xenonShim)
stopContainer(t, xenonShim)
}
func generateWCOWOciTestSpec(t *testing.T, imageLayers []string, scratchPath, hostRWSharedDirectory, hostROSharedDirectory string) *specs.Spec {
return &specs.Spec{
Windows: &specs.Windows{
LayerFolders: append(imageLayers, scratchPath),
},
Mounts: []specs.Mount{
{
Source: hostROSharedDirectory,
Destination: `c:\mappedro`,
Options: []string{"ro"},
},
{
Source: hostRWSharedDirectory,
Destination: `c:\mappedrw`,
},
},
}
}
// Argon through HCSOCI interface (v1)
func TestWCOWArgonOciV1(t *testing.T) {
imageLayers := testutilities.LayerFolders(t, imageName)
argonOci1Mounted := false
argonOci1ScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(argonOci1ScratchDir)
if err := wclayer.CreateScratchLayer(argonOci1ScratchDir, imageLayers); err != nil {
t.Fatalf("failed to create argon scratch layer: %s", err)
}
hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t)
defer os.RemoveAll(hostRWSharedDirectory)
defer os.RemoveAll(hostROSharedDirectory)
// For cleanup on failure
var argonOci1Resources *hcsoci.Resources
var argonOci1 *hcs.System
defer func() {
if argonOci1Mounted {
hcsoci.ReleaseResources(argonOci1Resources, nil, true)
}
}()
var err error
spec := generateWCOWOciTestSpec(t, imageLayers, argonOci1ScratchDir, hostRWSharedDirectory, hostROSharedDirectory)
argonOci1, argonOci1Resources, err = hcsoci.CreateContainer(
&hcsoci.CreateOptions{
ID: "argonOci1",
SchemaVersion: schemaversion.SchemaV10(),
Spec: spec,
})
if err != nil {
t.Fatal(err)
}
argonOci1Mounted = true
startContainer(t, argonOci1)
runHcsCommands(t, argonOci1)
stopContainer(t, argonOci1)
if err := hcsoci.ReleaseResources(argonOci1Resources, nil, true); err != nil {
t.Fatal(err)
}
argonOci1Mounted = false
}
// Xenon through HCSOCI interface (v1)
func TestWCOWXenonOciV1(t *testing.T) {
imageLayers := testutilities.LayerFolders(t, imageName)
xenonOci1Mounted := false
xenonOci1ScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(xenonOci1ScratchDir)
if err := wclayer.CreateScratchLayer(xenonOci1ScratchDir, imageLayers); err != nil {
t.Fatalf("failed to create xenon scratch layer: %s", err)
}
hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t)
defer os.RemoveAll(hostRWSharedDirectory)
defer os.RemoveAll(hostROSharedDirectory)
// TODO: This isn't currently used.
// uvmImagePath, err := uvmfolder.LocateUVMFolder(imageLayers)
// if err != nil {
// t.Fatalf("LocateUVMFolder failed %s", err)
// }
// For cleanup on failure
var xenonOci1Resources *hcsoci.Resources
var xenonOci1 *hcs.System
defer func() {
if xenonOci1Mounted {
hcsoci.ReleaseResources(xenonOci1Resources, nil, true)
}
}()
var err error
spec := generateWCOWOciTestSpec(t, imageLayers, xenonOci1ScratchDir, hostRWSharedDirectory, hostROSharedDirectory)
spec.Windows.HyperV = &specs.WindowsHyperV{}
xenonOci1, xenonOci1Resources, err = hcsoci.CreateContainer(
&hcsoci.CreateOptions{
ID: "xenonOci1",
SchemaVersion: schemaversion.SchemaV10(),
Spec: spec,
})
if err != nil {
t.Fatal(err)
}
xenonOci1Mounted = true
startContainer(t, xenonOci1)
runHcsCommands(t, xenonOci1)
stopContainer(t, xenonOci1)
if err := hcsoci.ReleaseResources(xenonOci1Resources, nil, true); err != nil {
t.Fatal(err)
}
xenonOci1Mounted = false
}
// Argon through HCSOCI interface (v2)
func TestWCOWArgonOciV2(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
imageLayers := testutilities.LayerFolders(t, imageName)
argonOci2Mounted := false
argonOci2ScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(argonOci2ScratchDir)
if err := wclayer.CreateScratchLayer(argonOci2ScratchDir, imageLayers); err != nil {
t.Fatalf("failed to create argon scratch layer: %s", err)
}
hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t)
defer os.RemoveAll(hostRWSharedDirectory)
defer os.RemoveAll(hostROSharedDirectory)
// For cleanup on failure
var argonOci2Resources *hcsoci.Resources
var argonOci2 *hcs.System
defer func() {
if argonOci2Mounted {
hcsoci.ReleaseResources(argonOci2Resources, nil, true)
}
}()
var err error
spec := generateWCOWOciTestSpec(t, imageLayers, argonOci2ScratchDir, hostRWSharedDirectory, hostROSharedDirectory)
argonOci2, argonOci2Resources, err = hcsoci.CreateContainer(
&hcsoci.CreateOptions{
ID: "argonOci2",
SchemaVersion: schemaversion.SchemaV21(),
Spec: spec,
})
if err != nil {
t.Fatal(err)
}
argonOci2Mounted = true
startContainer(t, argonOci2)
runHcsCommands(t, argonOci2)
stopContainer(t, argonOci2)
if err := hcsoci.ReleaseResources(argonOci2Resources, nil, true); err != nil {
t.Fatal(err)
}
argonOci2Mounted = false
}
// Xenon through HCSOCI interface (v2)
func TestWCOWXenonOciV2(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
imageLayers := testutilities.LayerFolders(t, imageName)
xenonOci2Mounted := false
xenonOci2UVMCreated := false
xenonOci2ScratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(xenonOci2ScratchDir)
if err := wclayer.CreateScratchLayer(xenonOci2ScratchDir, imageLayers); err != nil {
t.Fatalf("failed to create xenon scratch layer: %s", err)
}
hostRWSharedDirectory, hostROSharedDirectory := createTestMounts(t)
defer os.RemoveAll(hostRWSharedDirectory)
defer os.RemoveAll(hostROSharedDirectory)
uvmImagePath, err := uvmfolder.LocateUVMFolder(imageLayers)
if err != nil {
t.Fatalf("LocateUVMFolder failed %s", err)
}
var xenonOci2Resources *hcsoci.Resources
var xenonOci2 *hcs.System
var xenonOci2UVM *uvm.UtilityVM
defer func() {
if xenonOci2Mounted {
hcsoci.ReleaseResources(xenonOci2Resources, xenonOci2UVM, true)
}
if xenonOci2UVMCreated {
xenonOci2UVM.Close()
}
}()
// Create the utility VM.
xenonOci2UVMId := "xenonOci2UVM"
xenonOci2UVMScratchDir := testutilities.CreateTempDir(t)
if err := wcow.CreateUVMScratch(uvmImagePath, xenonOci2UVMScratchDir, xenonOci2UVMId); err != nil {
t.Fatalf("failed to create scratch: %s", err)
}
xenonOciOpts := uvm.NewDefaultOptionsWCOW(xenonOci2UVMId, "")
xenonOciOpts.LayerFolders = append(imageLayers, xenonOci2UVMScratchDir)
xenonOci2UVM, err = uvm.CreateWCOW(xenonOciOpts)
if err != nil {
t.Fatalf("Failed create UVM: %s", err)
}
xenonOci2UVMCreated = true
if err := xenonOci2UVM.Start(); err != nil {
xenonOci2UVM.Close()
t.Fatalf("Failed start UVM: %s", err)
}
spec := generateWCOWOciTestSpec(t, imageLayers, xenonOci2ScratchDir, hostRWSharedDirectory, hostROSharedDirectory)
xenonOci2, xenonOci2Resources, err = hcsoci.CreateContainer(
&hcsoci.CreateOptions{
ID: "xenonOci2",
HostingSystem: xenonOci2UVM,
SchemaVersion: schemaversion.SchemaV21(),
Spec: spec,
})
if err != nil {
t.Fatal(err)
}
xenonOci2Mounted = true
startContainer(t, xenonOci2)
runHcsCommands(t, xenonOci2)
stopContainer(t, xenonOci2)
if err := hcsoci.ReleaseResources(xenonOci2Resources, xenonOci2UVM, true); err != nil {
t.Fatal(err)
}
xenonOci2Mounted = false
// Terminate the UVM
xenonOci2UVM.Close()
xenonOci2UVMCreated = false
}

View File

@ -0,0 +1,51 @@
// xxxxbuild functional wcow wcowv2 wcowv2xenon
package functional
//import (
// "os"
// "testing"
// "github.com/Microsoft/hcsshim/test/functional/utilities"
// "github.com/Microsoft/hcsshim/internal/guid"
// "github.com/Microsoft/hcsshim/internal/hcsoci"
// "github.com/Microsoft/hcsshim/osversion"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// "github.com/Microsoft/hcsshim/internal/uvm"
// "github.com/Microsoft/hcsshim/internal/uvmfolder"
// "github.com/Microsoft/hcsshim/internal/wclayer"
// "github.com/Microsoft/hcsshim/internal/wcow"
// specs "github.com/opencontainers/runtime-spec/specs-go"
//)
// TODO. This might be worth porting.
//// Lots of v2 WCOW containers in the same UVM, each with a single base layer. Containers aren't
//// actually started, but it stresses the SCSI controller hot-add logic.
//func TestV2XenonWCOWCreateLots(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWCreateLots", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Close()
// // 63 as 0:0 is already taken as the UVMs scratch. So that leaves us with 64-1 left for container scratches on SCSI
// for i := 0; i < 63; i++ {
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// Id: fmt.Sprintf("container%d", i),
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer hostedContainer.Terminate()
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// }
// // TODO: Should check the internal structures here for VSMB and SCSI
// // TODO: Push it over 63 now and will get a failure.
//}

View File

@ -0,0 +1,67 @@
// +build integration
package runhcs
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
runhcs "github.com/Microsoft/hcsshim/pkg/go-runhcs"
)
func Test_CreateScratch_EmptyDestpath_Fail(t *testing.T) {
rhcs := runhcs.Runhcs{
Debug: true,
}
ctx := context.TODO()
err := rhcs.CreateScratch(ctx, "")
if err == nil {
t.Fatal("Should have failed 'CreateScratch' command.")
}
}
func Test_CreateScratch_DirDestpath_Failure(t *testing.T) {
rhcs := runhcs.Runhcs{
Debug: true,
}
td, err := ioutil.TempDir("", "CreateScratch")
if err != nil {
t.Fatal(err)
}
defer os.Remove(td)
ctx := context.TODO()
err = rhcs.CreateScratch(ctx, td)
if err == nil {
t.Fatal("Should have failed 'CreateScratch' command with dir destpath")
}
}
func Test_CreateScratch_ValidDestpath_Success(t *testing.T) {
rhcs := runhcs.Runhcs{
Debug: true,
}
td, err := ioutil.TempDir("", "CreateScratch")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(td)
scratchPath := filepath.Join(td, "scratch.vhdx")
ctx := context.TODO()
err = rhcs.CreateScratch(ctx, scratchPath)
if err != nil {
t.Fatalf("Failed 'CreateScratch' command with: %v", err)
}
_, err = os.Stat(scratchPath)
if err != nil {
t.Fatalf("Failed to stat scratch path with: %v", err)
}
}

View File

@ -0,0 +1,391 @@
// +build integration
package runhcs
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"testing"
"github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/osversion"
runhcs "github.com/Microsoft/hcsshim/pkg/go-runhcs"
testutilities "github.com/Microsoft/hcsshim/test/functional/utilities"
runc "github.com/containerd/go-runc"
"github.com/opencontainers/runtime-tools/generate"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
// These support matrix of runhcs.exe for end to end activations is quite
// complex. These tests attempt to codify a simple start test on each support
// host/guest/isolation type so that we can have at least minimal confidence
// when changing code that activations across all platforms still work.
//
// Host OS | Container OS | Isolation
//
// RS1 | RS1 | V1 - Argon, Xenon
//
// RS3 | RS1 | V1 - Xenon
// | RS3 | V1 - Argon, Xenon
//
// RS4 | RS1, RS3 | V1 - Xenon
// | RS4 | V1 - Argon, Xenon
//
// RS5 | RS1, RS3, RS4 | V2 - UVM + Argon
// | RS5 | V2 - Argon, UVM + Argon, UVM + Argon (s) (POD's)
// | LCOW | V2 - UVM + Linux Container, UVM + Linux Container (s) (POD's)
var _ = (runc.IO)(&testIO{})
type testIO struct {
g *errgroup.Group
or, ow *os.File
outBuff *bytes.Buffer
er, ew *os.File
errBuff *bytes.Buffer
}
func newTestIO(t *testing.T) *testIO {
var err error
tio := &testIO{
outBuff: &bytes.Buffer{},
errBuff: &bytes.Buffer{},
}
defer func() {
if err != nil {
tio.Close()
}
}()
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("failed to create stdout pipes: %v", err)
}
tio.or, tio.ow = r, w
r, w, err = os.Pipe()
if err != nil {
t.Fatalf("failed to create stderr pipes: %v", err)
}
tio.er, tio.ew = r, w
g, _ := errgroup.WithContext(context.TODO())
tio.g = g
tio.g.Go(func() error {
_, err := io.Copy(tio.outBuff, tio.Stdout())
return err
})
tio.g.Go(func() error {
_, err := io.Copy(tio.errBuff, tio.Stderr())
return err
})
return tio
}
func (t *testIO) Stdin() io.WriteCloser {
return nil
}
func (t *testIO) Stdout() io.ReadCloser {
return t.or
}
func (t *testIO) Stderr() io.ReadCloser {
return t.er
}
func (t *testIO) Set(cmd *exec.Cmd) {
cmd.Stdout = t.ow
cmd.Stderr = t.ew
}
func (t *testIO) Close() error {
var err error
for _, v := range []*os.File{
t.ow, t.ew,
t.or, t.er,
} {
if cerr := v.Close(); err == nil {
err = cerr
}
}
return err
}
func (t *testIO) CloseAfterStart() error {
t.ow.Close()
t.ew.Close()
return nil
}
func (t *testIO) Wait() error {
return t.g.Wait()
}
func getWindowsImageNameByVersion(t *testing.T, bv int) string {
switch bv {
case osversion.RS1:
return "mcr.microsoft.com/windows/nanoserver:sac2016"
case osversion.RS3:
return "mcr.microsoft.com/windows/nanoserver:1709"
case osversion.RS4:
return "mcr.microsoft.com/windows/nanoserver:1803"
case osversion.RS5:
// testImage = "mcr.microsoft.com/windows/nanoserver:1809"
return "mcr.microsoft.com/windows/nanoserver/insider:10.0.17763.55"
default:
t.Fatalf("unsupported build (%d) for Windows containers", bv)
}
// Won't hit because of t.Fatal
return ""
}
func readPidFile(path string) (int, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return -1, errors.Wrap(err, "failed to read pidfile")
}
p, err := strconv.Atoi(string(data))
if err != nil {
return -1, errors.Wrap(err, "pidfile failed to parse pid")
}
return p, nil
}
func testWindows(t *testing.T, version int, isolated bool) {
var err error
// Make the bundle
bundle := testutilities.CreateTempDir(t)
defer func() {
if err == nil {
os.RemoveAll(bundle)
} else {
t.Errorf("additional logs at bundle path: %v", bundle)
}
}()
scratch := testutilities.CreateTempDir(t)
defer func() {
vhd.DetachVhd(filepath.Join(scratch, "sandbox.vhdx"))
os.RemoveAll(scratch)
}()
// Generate the Spec
g, err := generate.New("windows")
if err != nil {
t.Errorf("failed to generate Windows config with error: %v", err)
return
}
g.SetProcessArgs([]string{"cmd", "/c", "echo Hello World!"})
if isolated {
g.SetWindowsHypervUntilityVMPath("")
}
g.Config.Windows.Network = nil
// Get the LayerFolders
imageName := getWindowsImageNameByVersion(t, version)
layers := testutilities.LayerFolders(t, imageName)
for _, layer := range layers {
g.AddWindowsLayerFolders(layer)
}
g.AddWindowsLayerFolders(scratch)
cf, err := os.Create(filepath.Join(bundle, "config.json"))
if err != nil {
t.Errorf("failed to create config.json with error: %v", err)
return
}
err = json.NewEncoder(cf).Encode(g.Config)
if err != nil {
cf.Close()
t.Errorf("failed to encode config.json with error: %v", err)
return
}
cf.Close()
// Create the Argon, Xenon, or UVM
ctx := context.TODO()
rhcs := runhcs.Runhcs{
Debug: true,
}
tio := newTestIO(t)
defer func() {
if err != nil {
t.Errorf("additional info stdout: '%v', stderr: '%v'", tio.outBuff.String(), tio.errBuff.String())
}
}()
defer func() {
tio.Close()
}()
copts := &runhcs.CreateOpts{
IO: tio,
PidFile: filepath.Join(bundle, "pid-file.txt"),
ShimLog: filepath.Join(bundle, "shim-log.txt"),
}
if isolated {
copts.VMLog = filepath.Join(bundle, "vm-log.txt")
}
err = rhcs.Create(ctx, t.Name(), bundle, copts)
if err != nil {
t.Errorf("failed to create container with error: %v", err)
return
}
defer func() {
rhcs.Delete(ctx, t.Name(), &runhcs.DeleteOpts{Force: true})
}()
// Find the shim/vmshim process and begin exit wait
pid, err := readPidFile(copts.PidFile)
if err != nil {
t.Errorf("failed to read pidfile with error: %v", err)
return
}
p, err := os.FindProcess(pid)
if err != nil {
t.Errorf("failed to find container process by pid: %d, with error: %v", pid, err)
return
}
// Start the container
err = rhcs.Start(ctx, t.Name())
if err != nil {
t.Errorf("failed to start container with error: %v", err)
return
}
defer func() {
if err != nil {
rhcs.Kill(ctx, t.Name(), "CtrlC")
}
}()
// Wait for process exit, verify the exited state
var exitStatus int
_, eerr := p.Wait()
if eerr != nil {
if exitErr, ok := eerr.(*exec.ExitError); ok {
if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok {
exitStatus = ws.ExitStatus()
}
}
}
if exitStatus != 0 {
err = eerr
t.Errorf("container process failed with exit status: %d", exitStatus)
return
}
// Wait for the relay to exit
tio.Wait()
outString := tio.outBuff.String()
if outString != "Hello World!\r\n" {
t.Errorf("stdout expected: 'Hello World!', got: '%v'", outString)
}
errString := tio.errBuff.String()
if errString != "" {
t.Errorf("stderr expected: '', got: '%v'", errString)
}
}
func testWindowsPod(t *testing.T, version int, isolated bool) {
t.Skip("not implemented")
}
func testLCOW(t *testing.T) {
t.Skip("not implemented")
}
func testLCOWPod(t *testing.T) {
t.Skip("not implemented")
}
func Test_RS1_Argon(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS1)
testWindows(t, osversion.RS1, false)
}
func Test_RS1_Xenon(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS1)
testWindows(t, osversion.RS1, true)
}
func Test_RS3_Argon(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS3)
testWindows(t, osversion.RS3, false)
}
func Test_RS3_Xenon(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS3)
guests := []int{osversion.RS1, osversion.RS3}
for _, g := range guests {
testWindows(t, g, true)
}
}
func Test_RS4_Argon(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS4)
testWindows(t, osversion.RS4, false)
}
func Test_RS4_Xenon(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS4)
guests := []int{osversion.RS1, osversion.RS3, osversion.RS4}
for _, g := range guests {
testWindows(t, g, true)
}
}
func Test_RS5_Argon(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS5)
testWindows(t, osversion.RS5, false)
}
func Test_RS5_ArgonPods(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS5)
testWindowsPod(t, osversion.RS5, false)
}
func Test_RS5_UVMAndContainer(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS5)
guests := []int{osversion.RS1, osversion.RS3, osversion.RS4, osversion.RS5}
for _, g := range guests {
testWindows(t, g, true)
}
}
func Test_RS5_UVMPods(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS5)
testWindowsPod(t, osversion.RS5, true)
}
func Test_RS5_LCOW(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS5)
testLCOW(t)
}
func Test_RS5_LCOW_UVMPods(t *testing.T) {
testutilities.RequiresExactBuild(t, osversion.RS5)
testLCOWPod(t)
}

View File

@ -0,0 +1,25 @@
// +build integration
package runhcs
import (
"context"
"testing"
runhcs "github.com/Microsoft/hcsshim/pkg/go-runhcs"
)
func Test_List_NoContainers(t *testing.T) {
rhcs := runhcs.Runhcs{
Debug: true,
}
ctx := context.TODO()
cs, err := rhcs.List(ctx)
if err != nil {
t.Fatalf("Failed 'List' command with: %v", err)
}
if len(cs) != 0 {
t.Fatalf("Length of ContainerState array expected: 0, actual: %d", len(cs))
}
}

View File

@ -0,0 +1,7 @@
// +build integration
package runhcs
import (
_ "github.com/Microsoft/hcsshim/test/functional/manifest"
)