Windows: Updates Windows Vendoring

Updates windows dependent libraries for vendoing.
This commit is contained in:
Nathan Gieseker
2019-01-23 18:43:18 -08:00
parent a686cc4bd8
commit 9a429d8d25
839 changed files with 282895 additions and 774 deletions

View File

@ -0,0 +1,173 @@
// +build windows
package hcsoci
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// CreateOptions are the set of fields used to call CreateContainer().
// Note: In the spec, the LayerFolders must be arranged in the same way in which
// moby configures them: layern, layern-1,...,layer2,layer1,scratch
// where layer1 is the base read-only layer, layern is the top-most read-only
// layer, and scratch is the RW layer. This is for historical reasons only.
type CreateOptions struct {
// Common parameters
ID string // Identifier for the container
Owner string // Specifies the owner. Defaults to executable name.
Spec *specs.Spec // Definition of the container or utility VM being created
SchemaVersion *hcsschema.Version // Requested Schema Version. Defaults to v2 for RS5, v1 for RS1..RS4
HostingSystem *uvm.UtilityVM // Utility or service VM in which the container is to be created.
NetworkNamespace string // Host network namespace to use (overrides anything in the spec)
// This is an advanced debugging parameter. It allows for diagnosibility by leaving a containers
// resources allocated in case of a failure. Thus you would be able to use tools such as hcsdiag
// to look at the state of a utility VM to see what resources were allocated. Obviously the caller
// must a) not tear down the utility VM on failure (or pause in some way) and b) is responsible for
// performing the ReleaseResources() call themselves.
DoNotReleaseResourcesOnFailure bool
}
// createOptionsInternal is the set of user-supplied create options, but includes internal
// fields for processing the request once user-supplied stuff has been validated.
type createOptionsInternal struct {
*CreateOptions
actualSchemaVersion *hcsschema.Version // Calculated based on Windows build and optional caller-supplied override
actualID string // Identifier for the container
actualOwner string // Owner for the container
actualNetworkNamespace string
}
// CreateContainer creates a container. It can cope with a wide variety of
// scenarios, including v1 HCS schema calls, as well as more complex v2 HCS schema
// calls. Note we always return the resources that have been allocated, even in the
// case of an error. This provides support for the debugging option not to
// release the resources on failure, so that the client can make the necessary
// call to release resources that have been allocated as part of calling this function.
func CreateContainer(createOptions *CreateOptions) (_ *hcs.System, _ *Resources, err error) {
logrus.Debugf("hcsshim::CreateContainer options: %+v", createOptions)
coi := &createOptionsInternal{
CreateOptions: createOptions,
actualID: createOptions.ID,
actualOwner: createOptions.Owner,
}
// Defaults if omitted by caller.
if coi.actualID == "" {
coi.actualID = guid.New().String()
}
if coi.actualOwner == "" {
coi.actualOwner = filepath.Base(os.Args[0])
}
if coi.Spec == nil {
return nil, nil, fmt.Errorf("Spec must be supplied")
}
if coi.HostingSystem != nil {
// By definition, a hosting system can only be supplied for a v2 Xenon.
coi.actualSchemaVersion = schemaversion.SchemaV21()
} else {
coi.actualSchemaVersion = schemaversion.DetermineSchemaVersion(coi.SchemaVersion)
logrus.Debugf("hcsshim::CreateContainer using schema %s", schemaversion.String(coi.actualSchemaVersion))
}
resources := &Resources{}
defer func() {
if err != nil {
if !coi.DoNotReleaseResourcesOnFailure {
ReleaseResources(resources, coi.HostingSystem, true)
}
}
}()
if coi.HostingSystem != nil {
n := coi.HostingSystem.ContainerCounter()
if coi.Spec.Linux != nil {
resources.containerRootInUVM = "/run/gcs/c/" + strconv.FormatUint(n, 16)
} else {
resources.containerRootInUVM = `C:\c\` + strconv.FormatUint(n, 16)
}
}
// Create a network namespace if necessary.
if coi.Spec.Windows != nil &&
coi.Spec.Windows.Network != nil &&
schemaversion.IsV21(coi.actualSchemaVersion) {
if coi.NetworkNamespace != "" {
resources.netNS = coi.NetworkNamespace
} else {
err := createNetworkNamespace(coi, resources)
if err != nil {
return nil, resources, err
}
}
coi.actualNetworkNamespace = resources.netNS
if coi.HostingSystem != nil {
endpoints, err := getNamespaceEndpoints(coi.actualNetworkNamespace)
if err != nil {
return nil, resources, err
}
err = coi.HostingSystem.AddNetNS(coi.actualNetworkNamespace, endpoints)
if err != nil {
return nil, resources, err
}
resources.addedNetNSToVM = true
}
}
var hcsDocument interface{}
logrus.Debugf("hcsshim::CreateContainer allocating resources")
if coi.Spec.Linux != nil {
if schemaversion.IsV10(coi.actualSchemaVersion) {
return nil, resources, errors.New("LCOW v1 not supported")
}
logrus.Debugf("hcsshim::CreateContainer allocateLinuxResources")
err = allocateLinuxResources(coi, resources)
if err != nil {
logrus.Debugf("failed to allocateLinuxResources %s", err)
return nil, resources, err
}
hcsDocument, err = createLinuxContainerDocument(coi, resources.containerRootInUVM)
if err != nil {
logrus.Debugf("failed createHCSContainerDocument %s", err)
return nil, resources, err
}
} else {
err = allocateWindowsResources(coi, resources)
if err != nil {
logrus.Debugf("failed to allocateWindowsResources %s", err)
return nil, resources, err
}
logrus.Debugf("hcsshim::CreateContainer creating container document")
hcsDocument, err = createWindowsContainerDocument(coi)
if err != nil {
logrus.Debugf("failed createHCSContainerDocument %s", err)
return nil, resources, err
}
}
logrus.Debugf("hcsshim::CreateContainer creating compute system")
system, err := hcs.CreateComputeSystem(coi.actualID, hcsDocument)
if err != nil {
logrus.Debugf("failed to CreateComputeSystem %s", err)
return nil, resources, err
}
return system, resources, err
}

View File

@ -0,0 +1,78 @@
// +build windows,functional
//
// These unit tests must run on a system setup to run both Argons and Xenons,
// have docker installed, and have the nanoserver (WCOW) and alpine (LCOW)
// base images installed. The nanoserver image MUST match the build of the
// host.
//
// We rely on docker as the tools to extract a container image aren't
// open source. We use it to find the location of the base image on disk.
//
package hcsoci
//import (
// "bytes"
// "encoding/json"
// "io/ioutil"
// "os"
// "os/exec"
// "path/filepath"
// "strings"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// _ "github.com/Microsoft/hcsshim/test/assets"
// specs "github.com/opencontainers/runtime-spec/specs-go"
// "github.com/sirupsen/logrus"
//)
//func startUVM(t *testing.T, uvm *UtilityVM) {
// if err := uvm.Start(); err != nil {
// t.Fatalf("UVM %s Failed start: %s", uvm.Id, err)
// }
//}
//// Helper to shoot a utility VM
//func terminateUtilityVM(t *testing.T, uvm *UtilityVM) {
// if err := uvm.Terminate(); err != nil {
// t.Fatalf("Failed terminate utility VM %s", err)
// }
//}
//// TODO: Test UVMResourcesFromContainerSpec
//func TestUVMSizing(t *testing.T) {
// t.Skip("for now - not implemented at all")
//}
//// TestID validates that the requested ID is retrieved
//func TestID(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// Id: "gruntbuggly",
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// if c.ID() != "gruntbuggly" {
// t.Fatalf("id not set correctly: %s", c.ID())
// }
// c.Terminate()
//}

View File

@ -0,0 +1,115 @@
// +build windows
package hcsoci
import (
"encoding/json"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
func createLCOWSpec(coi *createOptionsInternal) (*specs.Spec, error) {
// Remarshal the spec to perform a deep copy.
j, err := json.Marshal(coi.Spec)
if err != nil {
return nil, err
}
spec := &specs.Spec{}
err = json.Unmarshal(j, spec)
if err != nil {
return nil, err
}
// TODO
// Translate the mounts. The root has already been translated in
// allocateLinuxResources.
/*
for i := range spec.Mounts {
spec.Mounts[i].Source = "???"
spec.Mounts[i].Destination = "???"
}
*/
// Linux containers don't care about Windows aspects of the spec except the
// network namespace
spec.Windows = nil
if coi.Spec.Windows != nil &&
coi.Spec.Windows.Network != nil &&
coi.Spec.Windows.Network.NetworkNamespace != "" {
spec.Windows = &specs.Windows{
Network: &specs.WindowsNetwork{
NetworkNamespace: coi.Spec.Windows.Network.NetworkNamespace,
},
}
}
// Hooks are not supported (they should be run in the host)
spec.Hooks = nil
// Clear unsupported features
if spec.Linux.Resources != nil {
spec.Linux.Resources.Devices = nil
spec.Linux.Resources.Memory = nil
spec.Linux.Resources.Pids = nil
spec.Linux.Resources.BlockIO = nil
spec.Linux.Resources.HugepageLimits = nil
spec.Linux.Resources.Network = nil
}
spec.Linux.Seccomp = nil
// Clear any specified namespaces
var namespaces []specs.LinuxNamespace
for _, ns := range spec.Linux.Namespaces {
switch ns.Type {
case specs.NetworkNamespace:
default:
ns.Path = ""
namespaces = append(namespaces, ns)
}
}
spec.Linux.Namespaces = namespaces
return spec, nil
}
// This is identical to hcsschema.ComputeSystem but HostedSystem is an LCOW specific type - the schema docs only include WCOW.
type linuxComputeSystem struct {
Owner string `json:"Owner,omitempty"`
SchemaVersion *hcsschema.Version `json:"SchemaVersion,omitempty"`
HostingSystemId string `json:"HostingSystemId,omitempty"`
HostedSystem *linuxHostedSystem `json:"HostedSystem,omitempty"`
Container *hcsschema.Container `json:"Container,omitempty"`
VirtualMachine *hcsschema.VirtualMachine `json:"VirtualMachine,omitempty"`
ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"`
}
type linuxHostedSystem struct {
SchemaVersion *hcsschema.Version
OciBundlePath string
OciSpecification *specs.Spec
}
func createLinuxContainerDocument(coi *createOptionsInternal, guestRoot string) (interface{}, error) {
spec, err := createLCOWSpec(coi)
if err != nil {
return nil, err
}
logrus.Debugf("hcsshim::createLinuxContainerDoc: guestRoot:%s", guestRoot)
v2 := &linuxComputeSystem{
Owner: coi.actualOwner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
HostingSystemId: coi.HostingSystem.ID(),
HostedSystem: &linuxHostedSystem{
SchemaVersion: schemaversion.SchemaV21(),
OciBundlePath: guestRoot,
OciSpecification: spec,
},
}
return v2, nil
}

View File

@ -0,0 +1,273 @@
// +build windows
package hcsoci
import (
"fmt"
"path/filepath"
"regexp"
"runtime"
"strings"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvmfolder"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
// createWindowsContainerDocument creates a document suitable for calling HCS to create
// a container, both hosted and process isolated. It can create both v1 and v2
// schema, WCOW only. The containers storage should have been mounted already.
func createWindowsContainerDocument(coi *createOptionsInternal) (interface{}, error) {
logrus.Debugf("hcsshim: CreateHCSContainerDocument")
// TODO: Make this safe if exported so no null pointer dereferences.
if coi.Spec == nil {
return nil, fmt.Errorf("cannot create HCS container document - OCI spec is missing")
}
if coi.Spec.Windows == nil {
return nil, fmt.Errorf("cannot create HCS container document - OCI spec Windows section is missing ")
}
v1 := &schema1.ContainerConfig{
SystemType: "Container",
Name: coi.actualID,
Owner: coi.actualOwner,
HvPartition: false,
IgnoreFlushesDuringBoot: coi.Spec.Windows.IgnoreFlushesDuringBoot,
}
// IgnoreFlushesDuringBoot is a property of the SCSI attachment for the scratch. Set when it's hot-added to the utility VM
// ID is a property on the create call in V2 rather than part of the schema.
v2 := &hcsschema.ComputeSystem{
Owner: coi.actualOwner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
}
v2Container := &hcsschema.Container{Storage: &hcsschema.Storage{}}
// TODO: Still want to revisit this.
if coi.Spec.Windows.LayerFolders == nil || len(coi.Spec.Windows.LayerFolders) < 2 {
return nil, fmt.Errorf("invalid spec - not enough layer folders supplied")
}
if coi.Spec.Hostname != "" {
v1.HostName = coi.Spec.Hostname
v2Container.GuestOs = &hcsschema.GuestOs{HostName: coi.Spec.Hostname}
}
if coi.Spec.Windows.Resources != nil {
if coi.Spec.Windows.Resources.CPU != nil {
if coi.Spec.Windows.Resources.CPU.Count != nil ||
coi.Spec.Windows.Resources.CPU.Shares != nil ||
coi.Spec.Windows.Resources.CPU.Maximum != nil {
v2Container.Processor = &hcsschema.Processor{}
}
if coi.Spec.Windows.Resources.CPU.Count != nil {
cpuCount := *coi.Spec.Windows.Resources.CPU.Count
hostCPUCount := uint64(runtime.NumCPU())
if cpuCount > hostCPUCount {
logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
cpuCount = hostCPUCount
}
v1.ProcessorCount = uint32(cpuCount)
v2Container.Processor.Count = int32(cpuCount)
}
if coi.Spec.Windows.Resources.CPU.Shares != nil {
v1.ProcessorWeight = uint64(*coi.Spec.Windows.Resources.CPU.Shares)
v2Container.Processor.Weight = int32(v1.ProcessorWeight)
}
if coi.Spec.Windows.Resources.CPU.Maximum != nil {
v1.ProcessorMaximum = int64(*coi.Spec.Windows.Resources.CPU.Maximum)
v2Container.Processor.Maximum = int32(v1.ProcessorMaximum)
}
}
if coi.Spec.Windows.Resources.Memory != nil {
if coi.Spec.Windows.Resources.Memory.Limit != nil {
v1.MemoryMaximumInMB = int64(*coi.Spec.Windows.Resources.Memory.Limit) / 1024 / 1024
v2Container.Memory = &hcsschema.Memory{SizeInMB: int32(v1.MemoryMaximumInMB)}
}
}
if coi.Spec.Windows.Resources.Storage != nil {
if coi.Spec.Windows.Resources.Storage.Bps != nil || coi.Spec.Windows.Resources.Storage.Iops != nil {
v2Container.Storage.QoS = &hcsschema.StorageQoS{}
}
if coi.Spec.Windows.Resources.Storage.Bps != nil {
v1.StorageBandwidthMaximum = *coi.Spec.Windows.Resources.Storage.Bps
v2Container.Storage.QoS.BandwidthMaximum = int32(v1.StorageBandwidthMaximum)
}
if coi.Spec.Windows.Resources.Storage.Iops != nil {
v1.StorageIOPSMaximum = *coi.Spec.Windows.Resources.Storage.Iops
v2Container.Storage.QoS.IopsMaximum = int32(*coi.Spec.Windows.Resources.Storage.Iops)
}
}
}
// TODO V2 networking. Only partial at the moment. v2.Container.Networking.Namespace specifically
if coi.Spec.Windows.Network != nil {
v2Container.Networking = &hcsschema.Networking{}
v1.EndpointList = coi.Spec.Windows.Network.EndpointList
v2Container.Networking.Namespace = coi.actualNetworkNamespace
v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery
v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery
if coi.Spec.Windows.Network.DNSSearchList != nil {
v1.DNSSearchList = strings.Join(coi.Spec.Windows.Network.DNSSearchList, ",")
v2Container.Networking.DnsSearchList = v1.DNSSearchList
}
v1.NetworkSharedContainerName = coi.Spec.Windows.Network.NetworkSharedContainerName
v2Container.Networking.NetworkSharedContainerName = v1.NetworkSharedContainerName
}
// // TODO V2 Credentials not in the schema yet.
if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok {
v1.Credentials = cs
}
if coi.Spec.Root == nil {
return nil, fmt.Errorf("spec is invalid - root isn't populated")
}
if coi.Spec.Root.Readonly {
return nil, fmt.Errorf(`invalid container spec - readonly is not supported for Windows containers`)
}
// Strip off the top-most RW/scratch layer as that's passed in separately to HCS for v1
v1.LayerFolderPath = coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
if (schemaversion.IsV21(coi.actualSchemaVersion) && coi.HostingSystem == nil) ||
(schemaversion.IsV10(coi.actualSchemaVersion) && coi.Spec.Windows.HyperV == nil) {
// Argon v1 or v2.
const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
if matched, err := regexp.MatchString(volumeGUIDRegex, coi.Spec.Root.Path); !matched || err != nil {
return nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, coi.Spec.Root.Path)
}
if coi.Spec.Root.Path[len(coi.Spec.Root.Path)-1] != '\\' {
coi.Spec.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat
}
v1.VolumePath = coi.Spec.Root.Path[:len(coi.Spec.Root.Path)-1] // Strip the trailing backslash. Required for v1.
v2Container.Storage.Path = coi.Spec.Root.Path
} else {
// A hosting system was supplied, implying v2 Xenon; OR a v1 Xenon.
if schemaversion.IsV10(coi.actualSchemaVersion) {
// V1 Xenon
v1.HvPartition = true
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.HyperV == nil { // Be resilient to nil de-reference
return nil, fmt.Errorf(`invalid container spec - Spec.Windows.HyperV is nil`)
}
if coi.Spec.Windows.HyperV.UtilityVMPath != "" {
// Client-supplied utility VM path
v1.HvRuntime = &schema1.HvRuntime{ImagePath: coi.Spec.Windows.HyperV.UtilityVMPath}
} else {
// Client was lazy. Let's locate it from the layer folders instead.
uvmImagePath, err := uvmfolder.LocateUVMFolder(coi.Spec.Windows.LayerFolders)
if err != nil {
return nil, err
}
v1.HvRuntime = &schema1.HvRuntime{ImagePath: filepath.Join(uvmImagePath, `UtilityVM`)}
}
} else {
// Hosting system was supplied, so is v2 Xenon.
v2Container.Storage.Path = coi.Spec.Root.Path
if coi.HostingSystem.OS() == "windows" {
layers, err := computeV2Layers(coi.HostingSystem, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1])
if err != nil {
return nil, err
}
v2Container.Storage.Layers = layers
}
}
}
if coi.HostingSystem == nil { // Argon v1 or v2
for _, layerPath := range coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1] {
layerID, err := wclayer.LayerID(layerPath)
if err != nil {
return nil, err
}
v1.Layers = append(v1.Layers, schema1.Layer{ID: layerID.String(), Path: layerPath})
v2Container.Storage.Layers = append(v2Container.Storage.Layers, hcsschema.Layer{Id: layerID.String(), Path: layerPath})
}
}
// Add the mounts as mapped directories or mapped pipes
// TODO: Mapped pipes to add in v2 schema.
var (
mdsv1 []schema1.MappedDir
mpsv1 []schema1.MappedPipe
mdsv2 []hcsschema.MappedDirectory
mpsv2 []hcsschema.MappedPipe
)
for _, mount := range coi.Spec.Mounts {
const pipePrefix = `\\.\pipe\`
if mount.Type != "" {
return nil, fmt.Errorf("invalid container spec - Mount.Type '%s' must not be set", mount.Type)
}
if strings.HasPrefix(strings.ToLower(mount.Destination), pipePrefix) {
mpsv1 = append(mpsv1, schema1.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
mpsv2 = append(mpsv2, hcsschema.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
} else {
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
}
}
mdv1 := schema1.MappedDir{HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: readOnly}
mdv2 := hcsschema.MappedDirectory{ContainerPath: mount.Destination, ReadOnly: readOnly}
if coi.HostingSystem == nil {
mdv2.HostPath = mount.Source
} else {
uvmPath, err := coi.HostingSystem.GetVSMBUvmPath(mount.Source)
if err != nil {
if err == uvm.ErrNotAttached {
// It could also be a scsi mount.
uvmPath, err = coi.HostingSystem.GetScsiUvmPath(mount.Source)
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
mdv2.HostPath = uvmPath
}
mdsv1 = append(mdsv1, mdv1)
mdsv2 = append(mdsv2, mdv2)
}
}
v1.MappedDirectories = mdsv1
v2Container.MappedDirectories = mdsv2
if len(mpsv1) > 0 && osversion.Get().Build < osversion.RS3 {
return nil, fmt.Errorf("named pipe mounts are not supported on this version of Windows")
}
v1.MappedPipes = mpsv1
v2Container.MappedPipes = mpsv2
// Put the v2Container object as a HostedSystem for a Xenon, or directly in the schema for an Argon.
if coi.HostingSystem == nil {
v2.Container = v2Container
} else {
v2.HostingSystemId = coi.HostingSystem.ID()
v2.HostedSystem = &hcsschema.HostedSystem{
SchemaVersion: schemaversion.SchemaV21(),
Container: v2Container,
}
}
if schemaversion.IsV10(coi.actualSchemaVersion) {
return v1, nil
}
return v2, nil
}

View File

@ -0,0 +1,373 @@
// +build windows
package hcsoci
import (
"fmt"
"os"
"path"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/ospath"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type lcowLayerEntry struct {
hostPath string
uvmPath string
scsi bool
}
const scratchPath = "scratch"
// mountContainerLayers is a helper for clients to hide all the complexity of layer mounting
// Layer folder are in order: base, [rolayer1..rolayern,] scratch
//
// v1/v2: Argon WCOW: Returns the mount path on the host as a volume GUID.
// v1: Xenon WCOW: Done internally in HCS, so no point calling doing anything here.
// v2: Xenon WCOW: Returns a CombinedLayersV2 structure where ContainerRootPath is a folder
// inside the utility VM which is a GUID mapping of the scratch folder. Each
// of the layers are the VSMB locations where the read-only layers are mounted.
//
func MountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM) (interface{}, error) {
logrus.Debugln("hcsshim::mountContainerLayers", layerFolders)
if uvm == nil {
if len(layerFolders) < 2 {
return nil, fmt.Errorf("need at least two layers - base and scratch")
}
path := layerFolders[len(layerFolders)-1]
rest := layerFolders[:len(layerFolders)-1]
logrus.Debugln("hcsshim::mountContainerLayers ActivateLayer", path)
if err := wclayer.ActivateLayer(path); err != nil {
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Preparelayer", path, rest)
if err := wclayer.PrepareLayer(path, rest); err != nil {
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
}
return nil, err
}
mountPath, err := wclayer.GetLayerMountPath(path)
if err != nil {
if err := wclayer.UnprepareLayer(path); err != nil {
logrus.Warnf("Failed to Unprepare %s: %s", path, err)
}
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
}
return nil, err
}
return mountPath, nil
}
// V2 UVM
logrus.Debugf("hcsshim::mountContainerLayers Is a %s V2 UVM", uvm.OS())
// Add each read-only layers. For Windows, this is a VSMB share with the ResourceUri ending in
// a GUID based on the folder path. For Linux, this is a VPMEM device, except where is over the
// max size supported, where we put it on SCSI instead.
//
// Each layer is ref-counted so that multiple containers in the same utility VM can share them.
var wcowLayersAdded []string
var lcowlayersAdded []lcowLayerEntry
attachedSCSIHostPath := ""
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
var err error
if uvm.OS() == "windows" {
options := &hcsschema.VirtualSmbShareOptions{
ReadOnly: true,
PseudoOplocks: true,
TakeBackupPrivilege: true,
CacheIo: true,
ShareRead: true,
}
err = uvm.AddVSMB(layerPath, "", options)
if err == nil {
wcowLayersAdded = append(wcowLayersAdded, layerPath)
}
} else {
uvmPath := ""
hostPath := filepath.Join(layerPath, "layer.vhd")
var fi os.FileInfo
fi, err = os.Stat(hostPath)
if err == nil && uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
// Too big for PMEM. Add on SCSI instead (at /tmp/S<C>/<L>).
var (
controller int
lun int32
)
controller, lun, err = uvm.AddSCSILayer(hostPath)
if err == nil {
lcowlayersAdded = append(lcowlayersAdded,
lcowLayerEntry{
hostPath: hostPath,
uvmPath: fmt.Sprintf("/tmp/S%d/%d", controller, lun),
scsi: true,
})
}
} else {
_, uvmPath, err = uvm.AddVPMEM(hostPath, true) // UVM path is calculated. Will be /tmp/vN/
if err == nil {
lcowlayersAdded = append(lcowlayersAdded,
lcowLayerEntry{
hostPath: hostPath,
uvmPath: uvmPath,
})
}
}
}
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
}
// Add the scratch at an unused SCSI location. The container path inside the
// utility VM will be C:\<ID>.
hostPath := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
// BUGBUG Rename guestRoot better.
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
_, _, err := uvm.AddSCSI(hostPath, containerScratchPathInUVM, false)
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
attachedSCSIHostPath = hostPath
if uvm.OS() == "windows" {
// Load the filter at the C:\s<ID> location calculated above. We pass into this request each of the
// read-only layer folders.
layers, err := computeV2Layers(uvm, wcowLayersAdded)
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
guestRequest := guestrequest.CombinedLayers{
ContainerRootPath: containerScratchPathInUVM,
Layers: layers,
}
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
Settings: guestRequest,
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Add,
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
return guestRequest, nil
}
// This is the LCOW layout inside the utilityVM. NNN is the container "number"
// which increments for each container created in a utility VM.
//
// /run/gcs/c/NNN/config.json
// /run/gcs/c/NNN/rootfs
// /run/gcs/c/NNN/scratch/upper
// /run/gcs/c/NNN/scratch/work
//
// /dev/sda on /tmp/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
// /dev/pmem0 on /tmp/v0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
// /dev/sdb on /run/gcs/c/NNN/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
// overlay on /run/gcs/c/NNN/rootfs type overlay (rw,relatime,lowerdir=/tmp/v0,upperdir=/run/gcs/c/NNN/scratch/upper,workdir=/run/gcs/c/NNN/scratch/work)
//
// Where /dev/sda is the scratch for utility VM itself
// /dev/pmemX are read-only layers for containers
// /dev/sd(b...) are scratch spaces for each container
layers := []hcsschema.Layer{}
for _, l := range lcowlayersAdded {
layers = append(layers, hcsschema.Layer{Path: l.uvmPath})
}
guestRequest := guestrequest.CombinedLayers{
ContainerRootPath: path.Join(guestRoot, rootfsPath),
Layers: layers,
ScratchPath: containerScratchPathInUVM,
}
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Add,
Settings: guestRequest,
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
return guestRequest, nil
}
// UnmountOperation is used when calling Unmount() to determine what type of unmount is
// required. In V1 schema, this must be unmountOperationAll. In V2, client can
// be more optimal and only unmount what they need which can be a minor performance
// improvement (eg if you know only one container is running in a utility VM, and
// the UVM is about to be torn down, there's no need to unmount the VSMB shares,
// just SCSI to have a consistent file system).
type UnmountOperation uint
const (
UnmountOperationSCSI UnmountOperation = 0x01
UnmountOperationVSMB = 0x02
UnmountOperationVPMEM = 0x04
UnmountOperationAll = UnmountOperationSCSI | UnmountOperationVSMB | UnmountOperationVPMEM
)
// UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting
func UnmountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM, op UnmountOperation) error {
logrus.Debugln("hcsshim::unmountContainerLayers", layerFolders)
if uvm == nil {
// Must be an argon - folders are mounted on the host
if op != UnmountOperationAll {
return fmt.Errorf("only operation supported for host-mounted folders is unmountOperationAll")
}
if len(layerFolders) < 1 {
return fmt.Errorf("need at least one layer for Unmount")
}
path := layerFolders[len(layerFolders)-1]
logrus.Debugln("hcsshim::Unmount UnprepareLayer", path)
if err := wclayer.UnprepareLayer(path); err != nil {
return err
}
// TODO Should we try this anyway?
logrus.Debugln("hcsshim::unmountContainerLayers DeactivateLayer", path)
return wclayer.DeactivateLayer(path)
}
// V2 Xenon
// Base+Scratch as a minimum. This is different to v1 which only requires the scratch
if len(layerFolders) < 2 {
return fmt.Errorf("at least two layers are required for unmount")
}
var retError error
// Unload the storage filter followed by the SCSI scratch
if (op & UnmountOperationSCSI) == UnmountOperationSCSI {
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
logrus.Debugf("hcsshim::unmountContainerLayers CombinedLayers %s", containerScratchPathInUVM)
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Remove,
Settings: guestrequest.CombinedLayers{ContainerRootPath: containerScratchPathInUVM},
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
logrus.Errorf(err.Error())
}
// Hot remove the scratch from the SCSI controller
hostScratchFile := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
logrus.Debugf("hcsshim::unmountContainerLayers SCSI %s %s", containerScratchPathInUVM, hostScratchFile)
if err := uvm.RemoveSCSI(hostScratchFile); err != nil {
e := fmt.Errorf("failed to remove SCSI %s: %s", hostScratchFile, err)
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
// Remove each of the read-only layers from VSMB. These's are ref-counted and
// only removed once the count drops to zero. This allows multiple containers
// to share layers.
if uvm.OS() == "windows" && len(layerFolders) > 1 && (op&UnmountOperationVSMB) == UnmountOperationVSMB {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
if e := uvm.RemoveVSMB(layerPath); e != nil {
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
}
// Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted
// and only removed once the count drops to zero. This allows multiple containers to
// share layers. Note that SCSI is used on large layers.
if uvm.OS() == "linux" && len(layerFolders) > 1 && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
hostPath := filepath.Join(layerPath, "layer.vhd")
if fi, err := os.Stat(hostPath); err != nil {
var e error
if uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
e = uvm.RemoveSCSI(hostPath)
} else {
e = uvm.RemoveVPMEM(hostPath)
}
if e != nil {
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
}
}
// TODO (possibly) Consider deleting the container directory in the utility VM
return retError
}
func cleanupOnMountFailure(uvm *uvm.UtilityVM, wcowLayers []string, lcowLayers []lcowLayerEntry, scratchHostPath string) {
for _, wl := range wcowLayers {
if err := uvm.RemoveVSMB(wl); err != nil {
logrus.Warnf("Possibly leaked vsmbshare on error removal path: %s", err)
}
}
for _, ll := range lcowLayers {
if ll.scsi {
if err := uvm.RemoveSCSI(ll.hostPath); err != nil {
logrus.Warnf("Possibly leaked SCSI on error removal path: %s", err)
}
} else if err := uvm.RemoveVPMEM(ll.hostPath); err != nil {
logrus.Warnf("Possibly leaked vpmemdevice on error removal path: %s", err)
}
}
if scratchHostPath != "" {
if err := uvm.RemoveSCSI(scratchHostPath); err != nil {
logrus.Warnf("Possibly leaked SCSI disk on error removal path: %s", err)
}
}
}
func computeV2Layers(vm *uvm.UtilityVM, paths []string) (layers []hcsschema.Layer, err error) {
for _, path := range paths {
uvmPath, err := vm.GetVSMBUvmPath(path)
if err != nil {
return nil, err
}
layerID, err := wclayer.LayerID(path)
if err != nil {
return nil, err
}
layers = append(layers, hcsschema.Layer{Id: layerID.String(), Path: uvmPath})
}
return layers, nil
}

View File

@ -0,0 +1,41 @@
package hcsoci
import (
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/sirupsen/logrus"
)
func createNetworkNamespace(coi *createOptionsInternal, resources *Resources) error {
netID, err := hns.CreateNamespace()
if err != nil {
return err
}
logrus.Infof("created network namespace %s for %s", netID, coi.ID)
resources.netNS = netID
resources.createdNetNS = true
for _, endpointID := range coi.Spec.Windows.Network.EndpointList {
err = hns.AddNamespaceEndpoint(netID, endpointID)
if err != nil {
return err
}
logrus.Infof("added network endpoint %s to namespace %s", endpointID, netID)
resources.networkEndpoints = append(resources.networkEndpoints, endpointID)
}
return nil
}
func getNamespaceEndpoints(netNS string) ([]*hns.HNSEndpoint, error) {
ids, err := hns.GetNamespaceEndpoints(netNS)
if err != nil {
return nil, err
}
var endpoints []*hns.HNSEndpoint
for _, id := range ids {
endpoint, err := hns.GetHNSEndpointByID(id)
if err != nil {
return nil, err
}
endpoints = append(endpoints, endpoint)
}
return endpoints, nil
}

View File

@ -0,0 +1,127 @@
package hcsoci
import (
"os"
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/sirupsen/logrus"
)
// NetNS returns the network namespace for the container
func (r *Resources) NetNS() string {
return r.netNS
}
// Resources is the structure returned as part of creating a container. It holds
// nothing useful to clients, hence everything is lowercased. A client would use
// it in a call to ReleaseResource to ensure everything is cleaned up when a
// container exits.
type Resources struct {
// containerRootInUVM is the base path in a utility VM where elements relating
// to a container are exposed. For example, the mounted filesystem; the runtime
// spec (in the case of LCOW); overlay and scratch (in the case of LCOW).
//
// For WCOW, this will be under C:\c\N, and for LCOW this will
// be under /run/gcs/c/N. N is an atomic counter for each container created
// in that utility VM. For LCOW this is also the "OCI Bundle Path".
containerRootInUVM string
// layers is an array of the layer folder paths which have been mounted either on
// the host in the case or a WCOW Argon, or in a utility VM for WCOW Xenon and LCOW.
layers []string
// vsmbMounts is an array of the host-paths mounted into a utility VM to support
// (bind-)mounts into a WCOW v2 Xenon.
vsmbMounts []string
// plan9Mounts is an array of all the host paths which have been added to
// an LCOW utility VM
plan9Mounts []string
// netNS is the network namespace
netNS string
// networkEndpoints is the list of network endpoints used by the container
networkEndpoints []string
// createNetNS indicates if the network namespace has been created
createdNetNS bool
// addedNetNSToVM indicates if the network namespace has been added to the containers utility VM
addedNetNSToVM bool
// scsiMounts is an array of the host-paths mounted into a utility VM to
// support scsi device passthrough.
scsiMounts []string
}
// TODO: Method on the resources?
func ReleaseResources(r *Resources, vm *uvm.UtilityVM, all bool) error {
if vm != nil && r.addedNetNSToVM {
err := vm.RemoveNetNS(r.netNS)
if err != nil {
logrus.Warn(err)
}
r.addedNetNSToVM = false
}
if r.createdNetNS {
for len(r.networkEndpoints) != 0 {
endpoint := r.networkEndpoints[len(r.networkEndpoints)-1]
err := hns.RemoveNamespaceEndpoint(r.netNS, endpoint)
if err != nil {
if !os.IsNotExist(err) {
return err
}
logrus.Warnf("removing endpoint %s from namespace %s: does not exist", endpoint, r.NetNS())
}
r.networkEndpoints = r.networkEndpoints[:len(r.networkEndpoints)-1]
}
r.networkEndpoints = nil
err := hns.RemoveNamespace(r.netNS)
if err != nil && !os.IsNotExist(err) {
return err
}
r.createdNetNS = false
}
if len(r.layers) != 0 {
op := UnmountOperationSCSI
if vm == nil || all {
op = UnmountOperationAll
}
err := UnmountContainerLayers(r.layers, r.containerRootInUVM, vm, op)
if err != nil {
return err
}
r.layers = nil
}
if all {
for len(r.vsmbMounts) != 0 {
mount := r.vsmbMounts[len(r.vsmbMounts)-1]
if err := vm.RemoveVSMB(mount); err != nil {
return err
}
r.vsmbMounts = r.vsmbMounts[:len(r.vsmbMounts)-1]
}
for len(r.plan9Mounts) != 0 {
mount := r.plan9Mounts[len(r.plan9Mounts)-1]
if err := vm.RemovePlan9(mount); err != nil {
return err
}
r.plan9Mounts = r.plan9Mounts[:len(r.plan9Mounts)-1]
}
for _, path := range r.scsiMounts {
if err := vm.RemoveSCSI(path); err != nil {
return err
}
r.scsiMounts = nil
}
}
return nil
}

View File

@ -0,0 +1,104 @@
// +build windows
package hcsoci
// Contains functions relating to a LCOW container, as opposed to a utility VM
import (
"fmt"
"path"
"strconv"
"strings"
"github.com/Microsoft/hcsshim/internal/guestrequest"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
const rootfsPath = "rootfs"
const mountPathPrefix = "m"
func allocateLinuxResources(coi *createOptionsInternal, resources *Resources) error {
if coi.Spec.Root == nil {
coi.Spec.Root = &specs.Root{}
}
if coi.Spec.Root.Path == "" {
logrus.Debugln("hcsshim::allocateLinuxResources mounting storage")
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
if err != nil {
return fmt.Errorf("failed to mount container storage: %s", err)
}
if coi.HostingSystem == nil {
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
} else {
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon LCOW
}
resources.layers = coi.Spec.Windows.LayerFolders
} else {
// This is the "Plan 9" root filesystem.
// TODO: We need a test for this. Ask @jstarks how you can even lay this out on Windows.
hostPath := coi.Spec.Root.Path
uvmPathForContainersFileSystem := path.Join(resources.containerRootInUVM, rootfsPath)
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly)
if err != nil {
return fmt.Errorf("adding plan9 root: %s", err)
}
coi.Spec.Root.Path = uvmPathForContainersFileSystem
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
}
for i, mount := range coi.Spec.Mounts {
switch mount.Type {
case "bind":
case "physical-disk":
case "virtual-disk":
default:
// Unknown mount type
continue
}
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}
if coi.HostingSystem != nil {
hostPath := mount.Source
uvmPathForShare := path.Join(resources.containerRootInUVM, mountPathPrefix+strconv.Itoa(i))
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
break
}
}
if mount.Type == "physical-disk" {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
}
resources.scsiMounts = append(resources.scsiMounts, hostPath)
coi.Spec.Mounts[i].Type = "none"
} else if mount.Type == "virtual-disk" {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSI(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
}
resources.scsiMounts = append(resources.scsiMounts, hostPath)
coi.Spec.Mounts[i].Type = "none"
} else {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount %+v", mount)
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding plan9 mount %+v: %s", mount, err)
}
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
}
coi.Spec.Mounts[i].Source = uvmPathForShare
}
}
return nil
}

View File

@ -0,0 +1,127 @@
// +build windows
package hcsoci
// Contains functions relating to a WCOW container, as opposed to a utility VM
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/wclayer"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
func allocateWindowsResources(coi *createOptionsInternal, resources *Resources) error {
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.LayerFolders == nil {
return fmt.Errorf("field 'Spec.Windows.Layerfolders' is not populated")
}
scratchFolder := coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
logrus.Debugf("hcsshim::allocateWindowsResources scratch folder: %s", scratchFolder)
// TODO: Remove this code for auto-creation. Make the caller responsible.
// Create the directory for the RW scratch layer if it doesn't exist
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
logrus.Debugf("hcsshim::allocateWindowsResources container scratch folder does not exist so creating: %s ", scratchFolder)
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
return fmt.Errorf("failed to auto-create container scratch folder %s: %s", scratchFolder, err)
}
}
// Create sandbox.vhdx if it doesn't exist in the scratch folder. It's called sandbox.vhdx
// rather than scratch.vhdx as in the v1 schema, it's hard-coded in HCS.
if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) {
logrus.Debugf("hcsshim::allocateWindowsResources container sandbox.vhdx does not exist so creating in %s ", scratchFolder)
if err := wclayer.CreateScratchLayer(scratchFolder, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]); err != nil {
return fmt.Errorf("failed to CreateSandboxLayer %s", err)
}
}
if coi.Spec.Root == nil {
coi.Spec.Root = &specs.Root{}
}
if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) {
logrus.Debugln("hcsshim::allocateWindowsResources mounting storage")
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
if err != nil {
return fmt.Errorf("failed to mount container storage: %s", err)
}
if coi.HostingSystem == nil {
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
} else {
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon WCOW
}
resources.layers = coi.Spec.Windows.LayerFolders
}
// Validate each of the mounts. If this is a V2 Xenon, we have to add them as
// VSMB shares to the utility VM. For V1 Xenon and Argons, there's nothing for
// us to do as it's done by HCS.
for i, mount := range coi.Spec.Mounts {
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}
switch mount.Type {
case "":
case "physical-disk":
case "virtual-disk":
default:
return fmt.Errorf("invalid OCI spec - Type '%s' not supported", mount.Type)
}
if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) {
uvmPath := fmt.Sprintf("C:\\%s\\%d", coi.actualID, i)
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
break
}
}
if mount.Type == "physical-disk" {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(mount.Source, uvmPath, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
}
coi.Spec.Mounts[i].Type = ""
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
} else if mount.Type == "virtual-disk" {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSI(mount.Source, uvmPath, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
}
coi.Spec.Mounts[i].Type = ""
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
} else {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding VSMB share for OCI mount %+v", mount)
options := &hcsschema.VirtualSmbShareOptions{}
if readOnly {
options.ReadOnly = true
options.CacheIo = true
options.ShareRead = true
options.ForceLevelIIOplocks = true
break
}
err := coi.HostingSystem.AddVSMB(mount.Source, "", options)
if err != nil {
return fmt.Errorf("failed to add VSMB share to utility VM for mount %+v: %s", mount, err)
}
resources.vsmbMounts = append(resources.vsmbMounts, mount.Source)
}
}
}
return nil
}

View File

@ -0,0 +1,260 @@
// +build windows,functional
package hcsoci
//import (
// "os"
// "path/filepath"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// specs "github.com/opencontainers/runtime-spec/specs-go"
//)
//// --------------------------------
//// W C O W A R G O N V 1
//// --------------------------------
//// A v1 Argon with a single base layer. It also validates hostname functionality is propagated.
//func TestV1Argon(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV10(),
// Id: "TestV1Argon",
// Owner: "unit-test",
// Spec: &specs.Spec{
// Hostname: "goofy",
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "goofy")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with a single base layer which uses the auto-mount capability
//func TestV1ArgonAutoMount(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersBusybox, tempDir)
// c, err := CreateContainer(&CreateOptions{
// Id: "TestV1ArgonAutoMount",
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with multiple layers which uses the auto-mount capability
//func TestV1ArgonMultipleBaseLayersAutoMount(t *testing.T) {
// t.Skip("fornow")
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layers := append(layersBusybox, containerScratchDir)
// c, err := CreateContainer(&CreateOptions{
// Id: "TestV1ArgonMultipleBaseLayersAutoMount",
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with a single mapped directory.
//func TestV1ArgonSingleMappedDirectory(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, c)
// c.Terminate()
//}
//// --------------------------------
//// W C O W A R G O N V 2
//// --------------------------------
//// A v2 Argon with a single base layer. It also validates hostname functionality is propagated.
//// It also uses an auto-generated ID.
//func TestV2Argon(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Hostname: "mickey",
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "mickey")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with multiple layers
//func TestV2ArgonMultipleBaseLayers(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersBusybox, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Id: "TestV2ArgonMultipleBaseLayers",
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with multiple layers which uses the auto-mount capability and auto-create
//func TestV2ArgonAutoMountMultipleBaseLayers(t *testing.T) {
// t.Skip("fornow")
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layers := append(layersBusybox, containerScratchDir)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Id: "TestV2ArgonAutoMountMultipleBaseLayers",
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with a single mapped directory.
//func TestV2ArgonSingleMappedDirectory(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, c)
// c.Terminate()
//}

View File

@ -0,0 +1,365 @@
// +build windows,functional
package hcsoci
//import (
// "fmt"
// "os"
// "path/filepath"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// specs "github.com/opencontainers/runtime-spec/specs-go"
//)
//// --------------------------------
//// W C O W X E N O N V 2
//// --------------------------------
//// A single WCOW xenon. Note in this test, neither the UVM or the
//// containers are supplied IDs - they will be autogenerated for us.
//// This is the minimum set of parameters needed to create a V2 WCOW xenon.
//func TestV2XenonWCOW(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create the container hosted inside the utility VM
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}
//// TODO: Have a similar test where the UVM scratch folder does not exist.
//// A single WCOW xenon but where the container sandbox folder is not pre-created by the client
//func TestV2XenonWCOWContainerSandboxFolderDoesNotExist(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWContainerSandboxFolderDoesNotExist_UVM", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layerFolders := append(layersBusybox, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// Id: "container",
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}
//// TODO What about mount. Test with the client doing the mount.
//// TODO Test as above, but where sandbox for UVM is entirely created by a client to show how it's done.
//// Two v2 WCOW containers in the same UVM, each with a single base layer
//func TestV2XenonWCOWTwoContainers(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWTwoContainers_UVM", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // First hosted container
// firstContainerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(firstContainerScratchDir)
// firstLayerFolders := append(layersNanoserver, firstContainerScratchDir)
// firstHostedContainer, err := CreateContainer(&CreateOptions{
// Id: "FirstContainer",
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: firstLayerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(firstLayerFolders, uvm, unmountOperationAll)
// // Second hosted container
// secondContainerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(firstContainerScratchDir)
// secondLayerFolders := append(layersNanoserver, secondContainerScratchDir)
// secondHostedContainer, err := CreateContainer(&CreateOptions{
// Id: "SecondContainer",
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: secondLayerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(secondLayerFolders, uvm, unmountOperationAll)
// startContainer(t, firstHostedContainer)
// runCommand(t, firstHostedContainer, "cmd /s /c echo FirstContainer", `c:\`, "FirstContainer")
// startContainer(t, secondHostedContainer)
// runCommand(t, secondHostedContainer, "cmd /s /c echo SecondContainer", `c:\`, "SecondContainer")
// stopContainer(t, firstHostedContainer)
// stopContainer(t, secondHostedContainer)
// firstHostedContainer.Terminate()
// secondHostedContainer.Terminate()
//}
////// This verifies the container storage is unmounted correctly so that a second
////// container can be started from the same storage.
////func TestV2XenonWCOWWithRemount(t *testing.T) {
////// //t.Skip("Skipping for now")
//// uvmID := "Testv2XenonWCOWWithRestart_UVM"
//// uvmScratchDir, err := ioutil.TempDir("", "uvmScratch")
//// if err != nil {
//// t.Fatalf("Failed create temporary directory: %s", err)
//// }
//// if err := CreateWCOWSandbox(layersNanoserver[0], uvmScratchDir, uvmID); err != nil {
//// t.Fatalf("Failed create Windows UVM Sandbox: %s", err)
//// }
//// defer os.RemoveAll(uvmScratchDir)
//// uvm, err := CreateContainer(&CreateOptions{
//// Id: uvmID,
//// Owner: "unit-test",
//// SchemaVersion: SchemaV21(),
//// IsHostingSystem: true,
//// Spec: &specs.Spec{
//// Windows: &specs.Windows{
//// LayerFolders: []string{uvmScratchDir},
//// HyperV: &specs.WindowsHyperV{UtilityVMPath: filepath.Join(layersNanoserver[0], `UtilityVM\Files`)},
//// },
//// },
//// })
//// if err != nil {
//// t.Fatalf("Failed create UVM: %s", err)
//// }
//// defer uvm.Terminate()
//// if err := uvm.Start(); err != nil {
//// t.Fatalf("Failed start utility VM: %s", err)
//// }
//// // Mount the containers storage in the utility VM
//// containerScratchDir := createWCOWTempDirWithSandbox(t)
//// layerFolders := append(layersNanoserver, containerScratchDir)
//// cls, err := Mount(layerFolders, uvm, SchemaV21())
//// if err != nil {
//// t.Fatalf("failed to mount container storage: %s", err)
//// }
//// combinedLayers := cls.(CombinedLayersV2)
//// mountedLayers := &ContainersResourcesStorageV2{
//// Layers: combinedLayers.Layers,
//// Path: combinedLayers.ContainerRootPath,
//// }
//// defer func() {
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
//// t.Fatalf("failed to unmount container storage: %s", err)
//// }
//// }()
//// // Create the first container
//// defer os.RemoveAll(containerScratchDir)
//// xenon, err := CreateContainer(&CreateOptions{
//// Id: "container",
//// Owner: "unit-test",
//// HostingSystem: uvm,
//// SchemaVersion: SchemaV21(),
//// Spec: &specs.Spec{Windows: &specs.Windows{}}, // No layerfolders as we mounted them ourself.
//// })
//// if err != nil {
//// t.Fatalf("CreateContainer failed: %s", err)
//// }
//// // Start/stop the first container
//// startContainer(t, xenon)
//// runCommand(t, xenon, "cmd /s /c echo TestV2XenonWCOWFirstStart", `c:\`, "TestV2XenonWCOWFirstStart")
//// stopContainer(t, xenon)
//// xenon.Terminate()
//// // Now unmount and remount to exactly the same places
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
//// t.Fatalf("failed to unmount container storage: %s", err)
//// }
//// if _, err = Mount(layerFolders, uvm, SchemaV21()); err != nil {
//// t.Fatalf("failed to mount container storage: %s", err)
//// }
//// // Create an identical second container and verify it works too.
//// xenon2, err := CreateContainer(&CreateOptions{
//// Id: "container",
//// Owner: "unit-test",
//// HostingSystem: uvm,
//// SchemaVersion: SchemaV21(),
//// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
//// MountedLayers: mountedLayers,
//// })
//// if err != nil {
//// t.Fatalf("CreateContainer failed: %s", err)
//// }
//// startContainer(t, xenon2)
//// runCommand(t, xenon2, "cmd /s /c echo TestV2XenonWCOWAfterRemount", `c:\`, "TestV2XenonWCOWAfterRemount")
//// stopContainer(t, xenon2)
//// xenon2.Terminate()
////}
//// Lots of v2 WCOW containers in the same UVM, each with a single base layer. Containers aren't
//// actually started, but it stresses the SCSI controller hot-add logic.
//func TestV2XenonWCOWCreateLots(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWCreateLots", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // 63 as 0:0 is already taken as the UVMs scratch. So that leaves us with 64-1 left for container scratches on SCSI
// for i := 0; i < 63; i++ {
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// Id: fmt.Sprintf("container%d", i),
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer hostedContainer.Terminate()
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// }
// // TODO: Should check the internal structures here for VSMB and SCSI
// // TODO: Push it over 63 now and will get a failure.
//}
//// Helper for the v2 Xenon tests to create a utility VM. Returns the UtilityVM
//// object; folder used as its scratch
//func createv2WCOWUVM(t *testing.T, uvmLayers []string, uvmId string, resources *specs.WindowsResources) (*UtilityVM, string) {
// scratchDir := createTempDir(t)
// uvm := UtilityVM{
// OperatingSystem: "windows",
// LayerFolders: append(uvmLayers, scratchDir),
// Resources: resources,
// }
// if uvmId != "" {
// uvm.Id = uvmId
// }
// if err := uvm.Create(); err != nil {
// t.Fatalf("Failed create WCOW v2 UVM: %s", err)
// }
// if err := uvm.Start(); err != nil {
// t.Fatalf("Failed start WCOW v2UVM: %s", err)
// }
// return &uvm, scratchDir
//}
//// TestV2XenonWCOWMultiLayer creates a V2 Xenon having multiple image layers
//func TestV2XenonWCOWMultiLayer(t *testing.T) {
// t.Skip("for now")
// uvmMemory := uint64(1 * 1024 * 1024 * 1024)
// uvmCPUCount := uint64(2)
// resources := &specs.WindowsResources{
// Memory: &specs.WindowsMemoryResources{
// Limit: &uvmMemory,
// },
// CPU: &specs.WindowsCPUResources{
// Count: &uvmCPUCount,
// },
// }
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWMultiLayer_UVM", resources)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create a sandbox for the hosted container
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// // Create the container. Note that this will auto-mount for us.
// containerLayers := append(layersBusybox, containerScratchDir)
// xenon, err := CreateContainer(&CreateOptions{
// Id: "container",
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: containerLayers}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// // Start/stop the container
// startContainer(t, xenon)
// runCommand(t, xenon, "echo Container", `c:\`, "Container")
// stopContainer(t, xenon)
// xenon.Terminate()
// // TODO Move this to a defer function to fail if it fails.
// if err := unmountContainerLayers(containerLayers, uvm, unmountOperationAll); err != nil {
// t.Fatalf("unmount failed: %s", err)
// }
//}
//// TestV2XenonWCOWSingleMappedDirectory tests a V2 Xenon WCOW with a single mapped directory
//func TestV2XenonWCOWSingleMappedDirectory(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create the container hosted inside the utility VM
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// hostedContainer, err := CreateContainer(&CreateOptions{
// HostingSystem: uvm,
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layerFolders},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // TODO BUGBUG NEED TO UNMOUNT TO VSMB SHARE FOR THE CONTAINER
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}