Windows: Updates Windows Vendoring

Updates windows dependent libraries for vendoing.
This commit is contained in:
Nathan Gieseker
2019-01-23 18:43:18 -08:00
parent a686cc4bd8
commit 9a429d8d25
839 changed files with 282895 additions and 774 deletions

View File

@@ -0,0 +1,93 @@
// Package appargs provides argument validation routines for use with
// github.com/urfave/cli.
package appargs
import (
"errors"
"strconv"
"github.com/urfave/cli"
)
// Validator is an argument validator function. It returns the number of
// arguments consumed or -1 on error.
type Validator = func([]string) int
// String is a validator for strings.
func String(args []string) int {
if len(args) == 0 {
return -1
}
return 1
}
// NonEmptyString is a validator for non-empty strings.
func NonEmptyString(args []string) int {
if len(args) == 0 || args[0] == "" {
return -1
}
return 1
}
// Int returns a validator for integers.
func Int(base int, min int, max int) Validator {
return func(args []string) int {
if len(args) == 0 {
return -1
}
i, err := strconv.ParseInt(args[0], base, 0)
if err != nil || int(i) < min || int(i) > max {
return -1
}
return 1
}
}
// Optional returns a validator that treats an argument as optional.
func Optional(v Validator) Validator {
return func(args []string) int {
if len(args) == 0 {
return 0
}
return v(args)
}
}
// Rest returns a validator that validates each of the remaining arguments.
func Rest(v Validator) Validator {
return func(args []string) int {
count := len(args)
for len(args) != 0 {
n := v(args)
if n < 0 {
return n
}
args = args[n:]
}
return count
}
}
// ErrInvalidUsage is returned when there is a validation error.
var ErrInvalidUsage = errors.New("invalid command usage")
// Validate can be used as a command's Before function to validate the arguments
// to the command.
func Validate(vs ...Validator) cli.BeforeFunc {
return func(context *cli.Context) error {
remaining := context.Args()
for _, v := range vs {
consumed := v(remaining)
if consumed < 0 {
return ErrInvalidUsage
}
remaining = remaining[consumed:]
}
if len(remaining) > 0 {
return ErrInvalidUsage
}
return nil
}
}

View File

@@ -0,0 +1,110 @@
package cni
import (
"errors"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/regstate"
)
const (
cniRoot = "cni"
cniKey = "cfg"
)
// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM
// map.
type PersistedNamespaceConfig struct {
namespaceID string
stored bool
ContainerID string
HostUniqueID guid.GUID
}
// NewPersistedNamespaceConfig creates an in-memory namespace config that can be
// persisted to the registry.
func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig {
return &PersistedNamespaceConfig{
namespaceID: namespaceID,
ContainerID: containerID,
HostUniqueID: containerHostUniqueID,
}
}
// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches
// `namespaceID`. If not found returns `regstate.NotFoundError`
func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) {
sk, err := regstate.Open(cniRoot, false)
if err != nil {
return nil, err
}
defer sk.Close()
pnc := PersistedNamespaceConfig{
namespaceID: namespaceID,
stored: true,
}
if err := sk.Get(namespaceID, cniKey, &pnc); err != nil {
return nil, err
}
return &pnc, nil
}
// Store stores or updates the in-memory config to its registry state. If the
// store failes returns the store error.
func (pnc *PersistedNamespaceConfig) Store() error {
if pnc.namespaceID == "" {
return errors.New("invalid namespaceID ''")
}
if pnc.ContainerID == "" {
return errors.New("invalid containerID ''")
}
empty := guid.GUID{}
if pnc.HostUniqueID == empty {
return errors.New("invalid containerHostUniqueID 'empy'")
}
sk, err := regstate.Open(cniRoot, false)
if err != nil {
return err
}
defer sk.Close()
if pnc.stored {
if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil {
return err
}
} else {
if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil {
return err
}
}
pnc.stored = true
return nil
}
// Remove removes any persisted state associated with this config. If the config
// is not found in the registery `Remove` returns no error.
func (pnc *PersistedNamespaceConfig) Remove() error {
if pnc.stored {
sk, err := regstate.Open(cniRoot, false)
if err != nil {
if regstate.IsNotFoundError(err) {
pnc.stored = false
return nil
}
return err
}
defer sk.Close()
if err := sk.Remove(pnc.namespaceID); err != nil {
if regstate.IsNotFoundError(err) {
pnc.stored = false
return nil
}
return err
}
}
pnc.stored = false
return nil
}

View File

@@ -0,0 +1,137 @@
package cni
import (
"testing"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/regstate"
)
func Test_LoadPersistedNamespaceConfig_NoConfig(t *testing.T) {
pnc, err := LoadPersistedNamespaceConfig(t.Name())
if pnc != nil {
t.Fatal("config should be nil")
}
if err == nil {
t.Fatal("err should be set")
} else {
if !regstate.IsNotFoundError(err) {
t.Fatal("err should be NotFoundError")
}
}
}
func Test_LoadPersistedNamespaceConfig_WithConfig(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store failed with: %v", err)
}
defer pnc.Remove()
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
if err != nil {
t.Fatal("should have no error on stored config")
}
if pnc2 == nil {
t.Fatal("stored config should have been returned")
} else {
if pnc.namespaceID != pnc2.namespaceID {
t.Fatal("actual/stored namespaceID not equal")
}
if pnc.ContainerID != pnc2.ContainerID {
t.Fatal("actual/stored ContainerID not equal")
}
if pnc.HostUniqueID != pnc2.HostUniqueID {
t.Fatal("actual/stored HostUniqueID not equal")
}
if !pnc2.stored {
t.Fatal("stored should be true for registry load")
}
}
}
func Test_PersistedNamespaceConfig_StoreNew(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store failed with: %v", err)
}
defer pnc.Remove()
}
func Test_PersistedNamespaceConfig_StoreUpdate(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store failed with: %v", err)
}
defer pnc.Remove()
pnc.ContainerID = "test-container2"
pnc.HostUniqueID = guid.New()
err = pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store update failed with: %v", err)
}
// Verify the update
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
if err != nil {
t.Fatal("stored config should have been returned")
}
if pnc.ContainerID != pnc2.ContainerID {
t.Fatal("actual/stored ContainerID not equal")
}
if pnc.HostUniqueID != pnc2.HostUniqueID {
t.Fatal("actual/stored HostUniqueID not equal")
}
}
func Test_PersistedNamespaceConfig_RemoveNotStored(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Remove()
if err != nil {
t.Fatalf("remove on not stored should not fail: %v", err)
}
}
func Test_PersistedNamespaceConfig_RemoveStoredKey(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
t.Fatalf("store failed with: %v", err)
}
err = pnc.Remove()
if err != nil {
t.Fatalf("remove on stored key should not fail: %v", err)
}
}
func Test_PersistedNamespaceConfig_RemovedOtherKey(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
t.Fatalf("store failed with: %v", err)
}
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
if err != nil {
t.Fatal("should of found stored config")
}
err = pnc.Remove()
if err != nil {
t.Fatalf("remove on stored key should not fail: %v", err)
}
// Now remove the other key that has the invalid memory state
err = pnc2.Remove()
if err != nil {
t.Fatalf("remove on in-memory already removed should not fail: %v", err)
}
}

View File

@@ -0,0 +1,40 @@
package copyfile
import (
"fmt"
"syscall"
"unsafe"
)
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procCopyFileW = modkernel32.NewProc("CopyFileW")
)
// CopyFile is a utility for copying a file - used for the LCOW scratch cache.
// Uses CopyFileW win32 API for performance.
func CopyFile(srcFile, destFile string, overwrite bool) error {
var bFailIfExists uint32 = 1
if overwrite {
bFailIfExists = 0
}
lpExistingFileName, err := syscall.UTF16PtrFromString(srcFile)
if err != nil {
return err
}
lpNewFileName, err := syscall.UTF16PtrFromString(destFile)
if err != nil {
return err
}
r1, _, err := syscall.Syscall(
procCopyFileW.Addr(),
3,
uintptr(unsafe.Pointer(lpExistingFileName)),
uintptr(unsafe.Pointer(lpNewFileName)),
uintptr(bFailIfExists))
if r1 == 0 {
return fmt.Errorf("failed CopyFileW Win32 call from '%s' to '%s': %s", srcFile, destFile, err)
}
return nil
}

View File

@@ -0,0 +1,103 @@
package copywithtimeout
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"strconv"
"syscall"
"time"
"github.com/sirupsen/logrus"
)
// logDataByteCount is for an advanced debugging technique to allow
// data read/written to a processes stdio channels hex-dumped to the
// log when running at debug level or higher. It is controlled through
// the environment variable HCSSHIM_LOG_DATA_BYTE_COUNT
var logDataByteCount int64
func init() {
bytes := os.Getenv("HCSSHIM_LOG_DATA_BYTE_COUNT")
if len(bytes) > 0 {
u, err := strconv.ParseUint(bytes, 10, 32)
if err == nil {
logDataByteCount = int64(u)
}
}
}
// Copy is a wrapper for io.Copy using a timeout duration
func Copy(dst io.Writer, src io.Reader, size int64, context string, timeout time.Duration) (int64, error) {
logrus.WithFields(logrus.Fields{
"stdval": context,
"size": size,
"timeout": timeout,
}).Debug("hcsshim::copywithtimeout - Begin")
type resultType struct {
err error
bytes int64
}
done := make(chan resultType, 1)
go func() {
result := resultType{}
if logrus.GetLevel() < logrus.DebugLevel || logDataByteCount == 0 {
result.bytes, result.err = io.Copy(dst, src)
} else {
// In advanced debug mode where we log (hexdump format) what is copied
// up to the number of bytes defined by environment variable
// HCSSHIM_LOG_DATA_BYTE_COUNT
var buf bytes.Buffer
tee := io.TeeReader(src, &buf)
result.bytes, result.err = io.Copy(dst, tee)
if result.err == nil {
size := result.bytes
if size > logDataByteCount {
size = logDataByteCount
}
if size > 0 {
bytes := make([]byte, size)
if _, err := buf.Read(bytes); err == nil {
logrus.Debugf("hcsshim::copyWithTimeout - Read bytes\n%s", hex.Dump(bytes))
}
}
}
}
done <- result
}()
var result resultType
timedout := time.After(timeout)
select {
case <-timedout:
return 0, fmt.Errorf("hcsshim::copyWithTimeout: timed out (%s)", context)
case result = <-done:
if result.err != nil && result.err != io.EOF {
// See https://github.com/golang/go/blob/f3f29d1dea525f48995c1693c609f5e67c046893/src/os/exec/exec_windows.go for a clue as to why we are doing this :)
if se, ok := result.err.(syscall.Errno); ok {
const (
errNoData = syscall.Errno(232)
errBrokenPipe = syscall.Errno(109)
)
if se == errNoData || se == errBrokenPipe {
logrus.WithFields(logrus.Fields{
"stdval": context,
logrus.ErrorKey: se,
}).Debug("hcsshim::copywithtimeout - End")
return result.bytes, nil
}
}
return 0, fmt.Errorf("hcsshim::copyWithTimeout: error reading: '%s' after %d bytes (%s)", result.err, result.bytes, context)
}
}
logrus.WithFields(logrus.Fields{
"stdval": context,
"copied-bytes": result.bytes,
}).Debug("hcsshim::copywithtimeout - Completed Successfully")
return result.bytes, nil
}

View File

@@ -0,0 +1,100 @@
package guestrequest
import (
"github.com/Microsoft/hcsshim/internal/schema2"
)
// Arguably, many of these (at least CombinedLayers) should have been generated
// by swagger.
//
// This will also change package name due to an inbound breaking change.
// This class is used by a modify request to add or remove a combined layers
// structure in the guest. For windows, the GCS applies a filter in ContainerRootPath
// using the specified layers as the parent content. Ignores property ScratchPath
// since the container path is already the scratch path. For linux, the GCS unions
// the specified layers and ScratchPath together, placing the resulting union
// filesystem at ContainerRootPath.
type CombinedLayers struct {
ContainerRootPath string `json:"ContainerRootPath,omitempty"`
Layers []hcsschema.Layer `json:"Layers,omitempty"`
ScratchPath string `json:"ScratchPath,omitempty"`
}
// Defines the schema for hosted settings passed to GCS and/or OpenGCS
// SCSI. Scratch space for remote file-system commands, or R/W layer for containers
type LCOWMappedVirtualDisk struct {
MountPath string `json:"MountPath,omitempty"` // /tmp/scratch for an LCOW utility VM being used as a service VM
Lun uint8 `json:"Lun,omitempty"`
Controller uint8 `json:"Controller,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`
}
type WCOWMappedVirtualDisk struct {
ContainerPath string `json:"ContainerPath,omitempty"`
Lun int32 `json:"Lun,omitempty"`
}
type LCOWMappedDirectory struct {
MountPath string `json:"MountPath,omitempty"`
Port int32 `json:"Port,omitempty"`
ShareName string `json:"ShareName,omitempty"` // If empty not using ANames (not currently supported)
ReadOnly bool `json:"ReadOnly,omitempty"`
}
// Read-only layers over VPMem
type LCOWMappedVPMemDevice struct {
DeviceNumber uint32 `json:"DeviceNumber,omitempty"`
MountPath string `json:"MountPath,omitempty"` // /tmp/pN
}
type LCOWNetworkAdapter struct {
NamespaceID string `json:",omitempty"`
ID string `json:",omitempty"`
MacAddress string `json:",omitempty"`
IPAddress string `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
EnableLowMetric bool `json:",omitempty"`
EncapOverhead uint16 `json:",omitempty"`
}
type ResourceType string
const (
// These are constants for v2 schema modify guest requests.
ResourceTypeMappedDirectory ResourceType = "MappedDirectory"
ResourceTypeMappedVirtualDisk ResourceType = "MappedVirtualDisk"
ResourceTypeNetwork ResourceType = "Network"
ResourceTypeNetworkNamespace ResourceType = "NetworkNamespace"
ResourceTypeCombinedLayers ResourceType = "CombinedLayers"
ResourceTypeVPMemDevice ResourceType = "VPMemDevice"
)
// GuestRequest is for modify commands passed to the guest.
type GuestRequest struct {
RequestType string `json:"RequestType,omitempty"`
ResourceType ResourceType `json:"ResourceType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}
type NetworkModifyRequest struct {
AdapterId string `json:"AdapterId,omitempty"`
RequestType string `json:"RequestType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}
type RS4NetworkModifyRequest struct {
AdapterInstanceId string `json:"AdapterInstanceId,omitempty"`
RequestType string `json:"RequestType,omitempty"`
Settings interface{} `json:"Settings,omitempty"`
}
// SignalProcessOptions is the options passed to either WCOW or LCOW
// to signal a given process.
type SignalProcessOptions struct {
Signal int `json:,omitempty`
}

View File

@@ -0,0 +1,136 @@
package guid
import (
"encoding/json"
"fmt"
"testing"
)
func Test_New(t *testing.T) {
g := New()
g2 := New()
if g == g2 {
t.Fatal("GUID's should not be equal when generated")
}
}
func Test_FromString(t *testing.T) {
g := New()
g2 := FromString(g.String())
if g != g2 {
t.Fatalf("GUID's not equal %v, %v", g, g2)
}
}
func Test_MarshalJSON(t *testing.T) {
g := New()
gs := g.String()
js, err := json.Marshal(g)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("\"%s\"", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_MarshalJSON_Ptr(t *testing.T) {
g := New()
gs := g.String()
js, err := json.Marshal(&g)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("\"%s\"", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_MarshalJSON_Nested(t *testing.T) {
type test struct {
G GUID
}
t1 := test{
G: New(),
}
gs := t1.G.String()
js, err := json.Marshal(t1)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_MarshalJSON_Nested_Ptr(t *testing.T) {
type test struct {
G *GUID
}
v := New()
t1 := test{
G: &v,
}
gs := t1.G.String()
js, err := json.Marshal(t1)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_UnmarshalJSON(t *testing.T) {
g := New()
js, _ := json.Marshal(g)
var g2 GUID
err := json.Unmarshal(js, &g2)
if err != nil {
t.Fatalf("failed to unmarshal with: %v", err)
}
if g != g2 {
t.Fatalf("failed to unmarshal %s != %s", g, g2)
}
}
func Test_UnmarshalJSON_Nested(t *testing.T) {
type test struct {
G GUID
}
t1 := test{
G: New(),
}
js, _ := json.Marshal(t1)
var t2 test
err := json.Unmarshal(js, &t2)
if err != nil {
t.Fatalf("failed to unmarshal with: %v", err)
}
if t1.G != t2.G {
t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G)
}
}
func Test_UnmarshalJSON_Nested_Ptr(t *testing.T) {
type test struct {
G *GUID
}
v := New()
t1 := test{
G: &v,
}
js, _ := json.Marshal(t1)
var t2 test
err := json.Unmarshal(js, &t2)
if err != nil {
t.Fatalf("failed to unmarshal with: %v", err)
}
if *t1.G != *t2.G {
t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G)
}
}

View File

@@ -5,6 +5,7 @@ import (
"syscall"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
var (
@@ -15,11 +16,20 @@ var (
notificationWatcherCallback = syscall.NewCallback(notificationWatcher)
// Notifications for HCS_SYSTEM handles
hcsNotificationSystemExited hcsNotification = 0x00000001
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
hcsNotificationSystemExited hcsNotification = 0x00000001
hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002
hcsNotificationSystemStartCompleted hcsNotification = 0x00000003
hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004
hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005
hcsNotificationSystemCrashReport hcsNotification = 0x00000006
hcsNotificationSystemSiloJobCreated hcsNotification = 0x00000007
hcsNotificationSystemSaveCompleted hcsNotification = 0x00000008
hcsNotificationSystemRdpEnhancedModeStateChanged hcsNotification = 0x00000009
hcsNotificationSystemShutdownFailed hcsNotification = 0x0000000A
hcsNotificationSystemGetPropertiesCompleted hcsNotification = 0x0000000B
hcsNotificationSystemModifyCompleted hcsNotification = 0x0000000C
hcsNotificationSystemCrashInitiated hcsNotification = 0x0000000D
hcsNotificationSystemGuestConnectionClosed hcsNotification = 0x0000000E
// Notifications for HCS_PROCESS handles
hcsNotificationProcessExited hcsNotification = 0x00010000
@@ -49,16 +59,23 @@ func newChannels() notificationChannels {
channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1)
channels[hcsNotificationProcessExited] = make(notificationChannel, 1)
channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1)
channels[hcsNotificationSystemCrashReport] = make(notificationChannel, 1)
channels[hcsNotificationSystemSiloJobCreated] = make(notificationChannel, 1)
channels[hcsNotificationSystemSaveCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemRdpEnhancedModeStateChanged] = make(notificationChannel, 1)
channels[hcsNotificationSystemShutdownFailed] = make(notificationChannel, 1)
channels[hcsNotificationSystemGetPropertiesCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemModifyCompleted] = make(notificationChannel, 1)
channels[hcsNotificationSystemCrashInitiated] = make(notificationChannel, 1)
channels[hcsNotificationSystemGuestConnectionClosed] = make(notificationChannel, 1)
return channels
}
func closeChannels(channels notificationChannels) {
close(channels[hcsNotificationSystemExited])
close(channels[hcsNotificationSystemCreateCompleted])
close(channels[hcsNotificationSystemStartCompleted])
close(channels[hcsNotificationSystemPauseCompleted])
close(channels[hcsNotificationSystemResumeCompleted])
close(channels[hcsNotificationProcessExited])
close(channels[hcsNotificationServiceDisconnect])
for _, c := range channels {
close(c)
}
}
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
@@ -75,7 +92,13 @@ func notificationWatcher(notificationType hcsNotification, callbackNumber uintpt
return 0
}
context.channels[notificationType] <- result
if channel, ok := context.channels[notificationType]; ok {
channel <- result
} else {
logrus.WithFields(logrus.Fields{
"notification-type": notificationType,
}).Warn("Received a callback of an unsupported type")
}
return 0
}

View File

@@ -7,6 +7,7 @@ import (
"syscall"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/sirupsen/logrus"
)
@@ -72,6 +73,9 @@ var (
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
// ErrVmcomputeUnexpectedExit is an error encountered when the compute system terminates unexpectedly
ErrVmcomputeUnexpectedExit = syscall.Errno(0xC0370106)
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
)
@@ -116,10 +120,14 @@ func (ev *ErrorEvent) String() string {
func processHcsResult(resultp *uint16) []ErrorEvent {
if resultp != nil {
resultj := interop.ConvertAndFreeCoTaskMemString(resultp)
logrus.Debugf("Result: %s", resultj)
logrus.WithField(logfields.JSON, resultj).
Debug("HCS Result")
result := &hcsResult{}
if err := json.Unmarshal([]byte(resultj), result); err != nil {
logrus.Warnf("Could not unmarshal HCS result %s: %s", resultj, err)
logrus.WithFields(logrus.Fields{
logfields.JSON: resultj,
logrus.ErrorKey: err,
}).Warning("Could not unmarshal HCS result")
return nil
}
return result.ErrorEvents

View File

@@ -27,6 +27,7 @@ import (
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?

View File

@@ -0,0 +1,15 @@
package hcs
import "github.com/sirupsen/logrus"
func logOperationBegin(ctx logrus.Fields, msg string) {
logrus.WithFields(ctx).Debug(msg)
}
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
if err == nil {
logrus.WithFields(ctx).Debug(msg)
} else {
logrus.WithFields(ctx).WithError(err).Error(msg)
}
}

View File

@@ -2,13 +2,14 @@ package hcs
import (
"encoding/json"
"fmt"
"io"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/sirupsen/logrus"
)
@@ -20,6 +21,21 @@ type Process struct {
system *System
cachedPipes *cachedPipes
callbackNumber uintptr
logctx logrus.Fields
}
func newProcess(process hcsProcess, processID int, computeSystem *System) *Process {
return &Process{
handle: process,
processID: processID,
system: computeSystem,
logctx: logrus.Fields{
logfields.HCSOperation: "",
logfields.ContainerID: computeSystem.ID(),
logfields.ProcessID: processID,
},
}
}
type cachedPipes struct {
@@ -71,70 +87,122 @@ func (process *Process) SystemID() string {
return process.system.ID()
}
// Kill signals the process to terminate but does not wait for it to finish terminating.
func (process *Process) Kill() error {
func (process *Process) logOperationBegin(operation string) {
process.logctx[logfields.HCSOperation] = operation
logOperationBegin(
process.logctx,
"hcsshim::Process - Begin Operation")
}
func (process *Process) logOperationEnd(err error) {
var result string
if err == nil {
result = "Success"
} else {
result = "Error"
}
logOperationEnd(
process.logctx,
"hcsshim::Process - End Operation - "+result,
err)
process.logctx[logfields.HCSOperation] = ""
}
// Signal signals the process with `options`.
func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Kill"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
operation := "hcsshim::Process::Signal"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
optionsb, err := json.Marshal(options)
if err != nil {
return err
}
optionsStr := string(optionsb)
var resultp *uint16
syscallWatcher(process.logctx, func() {
err = hcsSignalProcess(process.handle, optionsStr, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
}
return nil
}
// Kill signals the process to terminate but does not wait for it to finish terminating.
func (process *Process) Kill() (err error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "hcsshim::Process::Kill"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("TerminateProcess %s: %d", process.SystemID(), process.Pid()), &completed)
err := hcsTerminateProcess(process.handle, &resultp)
completed = true
syscallWatcher(process.logctx, func() {
err = hcsTerminateProcess(process.handle, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Wait waits for the process to exit.
func (process *Process) Wait() error {
operation := "Wait"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
func (process *Process) Wait() (err error) {
operation := "hcsshim::Process::Wait"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
return makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
func (process *Process) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcssshim::Process::WaitTimeout"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
return makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// ResizeConsole resizes the console of the process.
func (process *Process) ResizeConsole(width, height uint16) error {
func (process *Process) ResizeConsole(width, height uint16) (err error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "ResizeConsole"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
operation := "hcsshim::Process::ResizeConsole"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -162,16 +230,16 @@ func (process *Process) ResizeConsole(width, height uint16) error {
return makeProcessError(process, operation, err, events)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *Process) Properties() (*ProcessStatus, error) {
func (process *Process) Properties() (_ *ProcessStatus, err error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Properties"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
operation := "hcsshim::Process::Properties"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
if process.handle == 0 {
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -181,10 +249,9 @@ func (process *Process) Properties() (*ProcessStatus, error) {
resultp *uint16
propertiesp *uint16
)
completed := false
go syscallWatcher(fmt.Sprintf("GetProcessProperties %s: %d", process.SystemID(), process.Pid()), &completed)
err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
completed = true
syscallWatcher(process.logctx, func() {
err = hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return nil, makeProcessError(process, operation, err, events)
@@ -200,14 +267,16 @@ func (process *Process) Properties() (*ProcessStatus, error) {
return nil, makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
return properties, nil
}
// ExitCode returns the exit code of the process. The process must have
// already terminated.
func (process *Process) ExitCode() (int, error) {
operation := "ExitCode"
func (process *Process) ExitCode() (_ int, err error) {
operation := "hcsshim::Process::ExitCode"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
properties, err := process.Properties()
if err != nil {
return 0, makeProcessError(process, operation, err, nil)
@@ -227,12 +296,13 @@ func (process *Process) ExitCode() (int, error) {
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Stdio"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
operation := "hcsshim::Process::Stdio"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
if process.handle == 0 {
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -245,7 +315,7 @@ func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, e
processInfo hcsProcessInformation
resultp *uint16
)
err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
err = hcsGetProcessInfo(process.handle, &processInfo, &resultp)
events := processHcsResult(resultp)
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, err, events)
@@ -265,18 +335,18 @@ func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, e
return nil, nil, nil, makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return pipes[0], pipes[1], pipes[2], nil
}
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
func (process *Process) CloseStdin() error {
func (process *Process) CloseStdin() (err error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "CloseStdin"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
operation := "hcsshim::Process::CloseStdin"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@@ -303,35 +373,34 @@ func (process *Process) CloseStdin() error {
return makeProcessError(process, operation, err, events)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Close cleans up any state associated with the process but does not kill
// or wait on it.
func (process *Process) Close() error {
func (process *Process) Close() (err error) {
process.handleLock.Lock()
defer process.handleLock.Unlock()
operation := "Close"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
operation := "hcsshim::Process::Close"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
// Don't double free this
if process.handle == 0 {
return nil
}
if err := process.unregisterCallback(); err != nil {
if err = process.unregisterCallback(); err != nil {
return makeProcessError(process, operation, err, nil)
}
if err := hcsCloseProcess(process.handle); err != nil {
if err = hcsCloseProcess(process.handle); err != nil {
return makeProcessError(process, operation, err, nil)
}
process.handle = 0
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}

View File

@@ -2,7 +2,6 @@ package hcs
import (
"encoding/json"
"fmt"
"os"
"strconv"
"sync"
@@ -10,6 +9,7 @@ import (
"time"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/sirupsen/logrus"
@@ -41,16 +41,49 @@ type System struct {
handle hcsSystem
id string
callbackNumber uintptr
logctx logrus.Fields
}
func newSystem(id string) *System {
return &System{
id: id,
logctx: logrus.Fields{
logfields.HCSOperation: "",
logfields.ContainerID: id,
},
}
}
func (computeSystem *System) logOperationBegin(operation string) {
computeSystem.logctx[logfields.HCSOperation] = operation
logOperationBegin(
computeSystem.logctx,
"hcsshim::ComputeSystem - Begin Operation")
}
func (computeSystem *System) logOperationEnd(err error) {
var result string
if err == nil {
result = "Success"
} else {
result = "Error"
}
logOperationEnd(
computeSystem.logctx,
"hcsshim::ComputeSystem - End Operation - "+result,
err)
computeSystem.logctx[logfields.HCSOperation] = ""
}
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System, error) {
operation := "CreateComputeSystem"
title := "hcsshim::" + operation
func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System, err error) {
operation := "hcsshim::CreateComputeSystem"
computeSystem := &System{
id: id,
}
computeSystem := newSystem(id)
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
if err != nil {
@@ -58,19 +91,22 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System,
}
hcsDocument := string(hcsDocumentB)
logrus.Debugf(title+" ID=%s config=%s", id, hcsDocument)
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, hcsDocument).
Debug("HCS ComputeSystem Document")
var (
resultp *uint16
identity syscall.Handle
resultp *uint16
identity syscall.Handle
createError error
)
completed := false
go syscallWatcher(fmt.Sprintf("CreateCompleteSystem %s: %s", id, hcsDocument), &completed)
createError := hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
createError = hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
})
if createError == nil || IsPending(createError) {
if err := computeSystem.registerCallback(); err != nil {
if err = computeSystem.registerCallback(); err != nil {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
computeSystem.Terminate()
@@ -88,25 +124,28 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System,
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, computeSystem.handle)
return computeSystem, nil
}
// OpenComputeSystem opens an existing compute system by ID.
func OpenComputeSystem(id string) (*System, error) {
operation := "OpenComputeSystem"
title := "hcsshim::" + operation
logrus.Debugf(title+" ID=%s", id)
func OpenComputeSystem(id string) (_ *System, err error) {
operation := "hcsshim::OpenComputeSystem"
computeSystem := &System{
id: id,
}
computeSystem := newSystem(id)
computeSystem.logOperationBegin(operation)
defer func() {
if IsNotExist(err) {
computeSystem.logOperationEnd(nil)
} else {
computeSystem.logOperationEnd(err)
}
}()
var (
handle hcsSystem
resultp *uint16
)
err := hcsOpenComputeSystem(id, &handle, &resultp)
err = hcsOpenComputeSystem(id, &handle, &resultp)
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, events)
@@ -114,18 +153,36 @@ func OpenComputeSystem(id string) (*System, error) {
computeSystem.handle = handle
if err := computeSystem.registerCallback(); err != nil {
if err = computeSystem.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
return computeSystem, nil
}
// GetComputeSystems gets a list of the compute systems on the system that match the query
func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) {
operation := "GetComputeSystems"
title := "hcsshim::" + operation
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
operation := "hcsshim::GetComputeSystems"
fields := logrus.Fields{
logfields.HCSOperation: operation,
}
logOperationBegin(
fields,
"hcsshim::ComputeSystem - Begin Operation")
defer func() {
var result string
if err == nil {
result = "Success"
} else {
result = "Error"
}
logOperationEnd(
fields,
"hcsshim::ComputeSystem - End Operation - "+result,
err)
}()
queryb, err := json.Marshal(q)
if err != nil {
@@ -133,16 +190,19 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerPropert
}
query := string(queryb)
logrus.Debugf(title+" query=%s", query)
logrus.WithFields(fields).
WithField(logfields.JSON, query).
Debug("HCS ComputeSystem Query")
var (
resultp *uint16
computeSystemsp *uint16
)
completed := false
go syscallWatcher(fmt.Sprintf("GetComputeSystems %s:", query), &completed)
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
completed = true
syscallWatcher(fields, func() {
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return nil, &HcsError{Op: operation, Err: err, Events: events}
@@ -153,20 +213,21 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerPropert
}
computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp)
computeSystems := []schema1.ContainerProperties{}
if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
if err = json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
return nil, err
}
logrus.Debugf(title + " succeeded")
return computeSystems, nil
}
// Start synchronously starts the computeSystem.
func (computeSystem *System) Start() error {
func (computeSystem *System) Start() (err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Start ID=" + computeSystem.ID()
logrus.Debugf(title)
operation := "hcsshim::ComputeSystem::Start"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
@@ -199,16 +260,14 @@ func (computeSystem *System) Start() error {
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("StartComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsStartComputeSystem(computeSystem.handle, "", &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsStartComputeSystem(computeSystem.handle, "", &resultp)
})
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
return makeSystemError(computeSystem, "Start", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
@@ -219,98 +278,133 @@ func (computeSystem *System) ID() string {
// Shutdown requests a compute system shutdown, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (computeSystem *System) Shutdown() error {
func (computeSystem *System) Shutdown() (err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Shutdown"
logrus.Debugf(title)
operation := "hcsshim::ComputeSystem::Shutdown"
computeSystem.logOperationBegin(operation)
defer func() {
if IsAlreadyStopped(err) {
computeSystem.logOperationEnd(nil)
} else {
computeSystem.logOperationEnd(err)
}
}()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("ShutdownComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Shutdown", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// Terminate requests a compute system terminate, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (computeSystem *System) Terminate() error {
func (computeSystem *System) Terminate() (err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Terminate ID=" + computeSystem.ID()
logrus.Debugf(title)
operation := "hcsshim::ComputeSystem::Terminate"
computeSystem.logOperationBegin(operation)
defer func() {
if IsPending(err) {
computeSystem.logOperationEnd(nil)
} else {
computeSystem.logOperationEnd(err)
}
}()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("TerminateComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
})
events := processHcsResult(resultp)
if err != nil {
if err != nil && err != ErrVmcomputeAlreadyStopped {
return makeSystemError(computeSystem, "Terminate", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// Wait synchronously waits for the compute system to shutdown or terminate.
func (computeSystem *System) Wait() error {
title := "hcsshim::ComputeSystem::Wait ID=" + computeSystem.ID()
logrus.Debugf(title)
func (computeSystem *System) Wait() (err error) {
operation := "hcsshim::ComputeSystem::Wait"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
return makeSystemError(computeSystem, "Wait", "", err, nil)
}
logrus.Debugf(title + " succeeded")
return nil
}
// WaitExpectedError synchronously waits for the compute system to shutdown or
// terminate, and ignores the passed error if it occurs.
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
operation := "hcsshim::ComputeSystem::WaitExpectedError"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil && err != expected {
return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil)
}
return nil
}
// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (computeSystem *System) WaitTimeout(timeout time.Duration) error {
title := "hcsshim::ComputeSystem::WaitTimeout ID=" + computeSystem.ID()
logrus.Debugf(title)
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcsshim::ComputeSystem::WaitTimeout"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
return makeSystemError(computeSystem, "WaitTimeout", "", err, nil)
}
logrus.Debugf(title + " succeeded")
return nil
}
func (computeSystem *System) Properties(types ...schema1.PropertyType) (*schema1.ContainerProperties, error) {
func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schema1.ContainerProperties, err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
operation := "hcsshim::ComputeSystem::Properties"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
queryj, err := json.Marshal(schema1.PropertyQuery{types})
if err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
}
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, queryj).
Debug("HCS ComputeSystem Properties Query")
var resultp, propertiesp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("GetComputeSystemProperties %s:", computeSystem.ID()), &completed)
err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, events)
@@ -324,64 +418,69 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (*schema1
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
}
return properties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Pause() error {
func (computeSystem *System) Pause() (err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Pause ID=" + computeSystem.ID()
logrus.Debugf(title)
operation := "hcsshim::ComputeSystem::Pause"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("PauseComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
})
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
if err != nil {
return makeSystemError(computeSystem, "Pause", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Resume() error {
func (computeSystem *System) Resume() (err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Resume ID=" + computeSystem.ID()
logrus.Debugf(title)
operation := "hcsshim::ComputeSystem::Resume"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("ResumeComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
})
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
if err != nil {
return makeSystemError(computeSystem, "Resume", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// CreateProcess launches a new process within the computeSystem.
func (computeSystem *System) CreateProcess(c interface{}) (*Process, error) {
func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::CreateProcess ID=" + computeSystem.ID()
operation := "hcsshim::ComputeSystem::CreateProcess"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
var (
processInfo hcsProcessInformation
processHandle hcsProcess
@@ -398,42 +497,50 @@ func (computeSystem *System) CreateProcess(c interface{}) (*Process, error) {
}
configuration := string(configurationb)
logrus.Debugf(title+" config=%s", configuration)
completed := false
go syscallWatcher(fmt.Sprintf("CreateProcess %s: %s", computeSystem.ID(), configuration), &completed)
err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
completed = true
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, configuration).
Debug("HCS ComputeSystem Process Document")
syscallWatcher(computeSystem.logctx, func() {
err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events)
}
process := &Process{
handle: processHandle,
processID: int(processInfo.ProcessId),
system: computeSystem,
cachedPipes: &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
},
logrus.WithFields(computeSystem.logctx).
WithField(logfields.ProcessID, processInfo.ProcessId).
Debug("HCS ComputeSystem CreateProcess PID")
process := newProcess(processHandle, int(processInfo.ProcessId), computeSystem)
process.cachedPipes = &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
}
if err := process.registerCallback(); err != nil {
if err = process.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return process, nil
}
// OpenProcess gets an interface to an existing process within the computeSystem.
func (computeSystem *System) OpenProcess(pid int) (*Process, error) {
func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::OpenProcess ID=" + computeSystem.ID()
logrus.Debugf(title+" processid=%d", pid)
// Add PID for the context of this operation
computeSystem.logctx[logfields.ProcessID] = pid
defer delete(computeSystem.logctx, logfields.ProcessID)
operation := "hcsshim::ComputeSystem::OpenProcess"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
var (
processHandle hcsProcess
resultp *uint16
@@ -443,56 +550,49 @@ func (computeSystem *System) OpenProcess(pid int) (*Process, error) {
return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil)
}
completed := false
go syscallWatcher(fmt.Sprintf("OpenProcess %s: %d", computeSystem.ID(), pid), &completed)
err := hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events)
}
process := &Process{
handle: processHandle,
processID: pid,
system: computeSystem,
}
if err := process.registerCallback(); err != nil {
process := newProcess(processHandle, pid, computeSystem)
if err = process.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil)
}
logrus.Debugf(title+" succeeded processid=%s", process.processID)
return process, nil
}
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
func (computeSystem *System) Close() error {
func (computeSystem *System) Close() (err error) {
computeSystem.handleLock.Lock()
defer computeSystem.handleLock.Unlock()
title := "hcsshim::ComputeSystem::Close ID=" + computeSystem.ID()
logrus.Debugf(title)
operation := "hcsshim::ComputeSystem::Close"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
// Don't double free this
if computeSystem.handle == 0 {
return nil
}
if err := computeSystem.unregisterCallback(); err != nil {
if err = computeSystem.unregisterCallback(); err != nil {
return makeSystemError(computeSystem, "Close", "", err, nil)
}
completed := false
go syscallWatcher(fmt.Sprintf("CloseComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsCloseComputeSystem(computeSystem.handle)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsCloseComputeSystem(computeSystem.handle)
})
if err != nil {
return makeSystemError(computeSystem, "Close", "", err, nil)
}
computeSystem.handle = 0
logrus.Debugf(title + " succeeded")
return nil
}
@@ -553,11 +653,14 @@ func (computeSystem *System) unregisterCallback() error {
return nil
}
// Modifies the System by sending a request to HCS
func (computeSystem *System) Modify(config interface{}) error {
// Modify the System by sending a request to HCS
func (computeSystem *System) Modify(config interface{}) (err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::Modify ID=" + computeSystem.id
operation := "hcsshim::ComputeSystem::Modify"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
@@ -569,17 +672,19 @@ func (computeSystem *System) Modify(config interface{}) error {
}
requestString := string(requestJSON)
logrus.Debugf(title + " " + requestString)
logrus.WithFields(computeSystem.logctx).
WithField(logfields.JSON, requestString).
Debug("HCS ComputeSystem Modify Document")
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("ModifyComputeSystem %s: %s", computeSystem.ID(), requestString), &completed)
err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
completed = true
syscallWatcher(computeSystem.logctx, func() {
err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
})
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Modify", requestString, err, events)
}
logrus.Debugf(title + " succeeded ")
return nil
}

View File

@@ -1,8 +1,9 @@
package hcs
import (
"time"
"context"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/sirupsen/logrus"
)
@@ -16,15 +17,25 @@ import (
//
// Usage is:
//
// completed := false
// go syscallWatcher("some description", &completed)
// <syscall>
// completed = true
// syscallWatcher(logContext, func() {
// err = <syscall>(args...)
// })
//
func syscallWatcher(description string, syscallCompleted *bool) {
time.Sleep(timeout.SyscallWatcher)
if *syscallCompleted {
return
}
logrus.Warnf("%s: Did not complete within %s. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see is there is a syscall stuck in the platform API for a significant length of time.", description, timeout.SyscallWatcher)
func syscallWatcher(logContext logrus.Fields, syscallLambda func()) {
ctx, cancel := context.WithTimeout(context.Background(), timeout.SyscallWatcher)
defer cancel()
go watchFunc(ctx, logContext)
syscallLambda()
}
func watchFunc(ctx context.Context, logContext logrus.Fields) {
select {
case <-ctx.Done():
if ctx.Err() != context.Canceled {
logrus.WithFields(logContext).
WithField(logfields.Timeout, timeout.SyscallWatcher).
Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.")
}
}
}

View File

@@ -1,4 +1,4 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
// Code generated mksyscall_windows.exe DO NOT EDIT
package hcs
@@ -6,7 +6,6 @@ import (
"syscall"
"unsafe"
"github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
@@ -57,12 +56,13 @@ var (
procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess")
procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess")
procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess")
procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo")
procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties")
procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess")
procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties")
procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback")
procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback")
procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo")
procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties")
procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess")
procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties")
procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback")
procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback")
)
func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) {
@@ -80,7 +80,10 @@ func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result
}
r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -105,7 +108,10 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall
}
r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -125,7 +131,10 @@ func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16
}
r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -136,7 +145,10 @@ func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -156,7 +168,10 @@ func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **u
}
r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -176,7 +191,10 @@ func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result
}
r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -196,7 +214,10 @@ func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result
}
r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -216,7 +237,10 @@ func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **u
}
r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -236,7 +260,10 @@ func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **
}
r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -256,7 +283,10 @@ func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint
}
r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -276,7 +306,10 @@ func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, res
}
r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -287,7 +320,10 @@ func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr,
}
r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -298,7 +334,10 @@ func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -318,7 +357,10 @@ func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, proce
}
r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -329,7 +371,10 @@ func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, re
}
r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -340,7 +385,10 @@ func hcsCloseProcess(process hcsProcess) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -351,7 +399,33 @@ func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func hcsSignalProcess(process hcsProcess, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
return _hcsSignalProcess(process, _p0, result)
}
func _hcsSignalProcess(process hcsProcess, options *uint16, result **uint16) (hr error) {
if hr = procHcsTerminateProcess.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -362,7 +436,10 @@ func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInforma
}
r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -373,7 +450,10 @@ func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, res
}
r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -393,7 +473,10 @@ func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (h
}
r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -413,7 +496,10 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result
}
r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -424,7 +510,10 @@ func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context ui
}
r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
@@ -435,7 +524,10 @@ func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) {
}
r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}

View File

@@ -36,10 +36,6 @@ func New(err error, title, rest string) error {
return &HcsError{title, rest, err}
}
func Errorf(err error, title, format string, a ...interface{}) error {
return New(err, title, fmt.Sprintf(format, a...))
}
func Win32FromError(err error) uint32 {
if herr, ok := err.(*HcsError); ok {
return Win32FromError(herr.Err)

View File

@@ -0,0 +1,173 @@
// +build windows
package hcsoci
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// CreateOptions are the set of fields used to call CreateContainer().
// Note: In the spec, the LayerFolders must be arranged in the same way in which
// moby configures them: layern, layern-1,...,layer2,layer1,scratch
// where layer1 is the base read-only layer, layern is the top-most read-only
// layer, and scratch is the RW layer. This is for historical reasons only.
type CreateOptions struct {
// Common parameters
ID string // Identifier for the container
Owner string // Specifies the owner. Defaults to executable name.
Spec *specs.Spec // Definition of the container or utility VM being created
SchemaVersion *hcsschema.Version // Requested Schema Version. Defaults to v2 for RS5, v1 for RS1..RS4
HostingSystem *uvm.UtilityVM // Utility or service VM in which the container is to be created.
NetworkNamespace string // Host network namespace to use (overrides anything in the spec)
// This is an advanced debugging parameter. It allows for diagnosibility by leaving a containers
// resources allocated in case of a failure. Thus you would be able to use tools such as hcsdiag
// to look at the state of a utility VM to see what resources were allocated. Obviously the caller
// must a) not tear down the utility VM on failure (or pause in some way) and b) is responsible for
// performing the ReleaseResources() call themselves.
DoNotReleaseResourcesOnFailure bool
}
// createOptionsInternal is the set of user-supplied create options, but includes internal
// fields for processing the request once user-supplied stuff has been validated.
type createOptionsInternal struct {
*CreateOptions
actualSchemaVersion *hcsschema.Version // Calculated based on Windows build and optional caller-supplied override
actualID string // Identifier for the container
actualOwner string // Owner for the container
actualNetworkNamespace string
}
// CreateContainer creates a container. It can cope with a wide variety of
// scenarios, including v1 HCS schema calls, as well as more complex v2 HCS schema
// calls. Note we always return the resources that have been allocated, even in the
// case of an error. This provides support for the debugging option not to
// release the resources on failure, so that the client can make the necessary
// call to release resources that have been allocated as part of calling this function.
func CreateContainer(createOptions *CreateOptions) (_ *hcs.System, _ *Resources, err error) {
logrus.Debugf("hcsshim::CreateContainer options: %+v", createOptions)
coi := &createOptionsInternal{
CreateOptions: createOptions,
actualID: createOptions.ID,
actualOwner: createOptions.Owner,
}
// Defaults if omitted by caller.
if coi.actualID == "" {
coi.actualID = guid.New().String()
}
if coi.actualOwner == "" {
coi.actualOwner = filepath.Base(os.Args[0])
}
if coi.Spec == nil {
return nil, nil, fmt.Errorf("Spec must be supplied")
}
if coi.HostingSystem != nil {
// By definition, a hosting system can only be supplied for a v2 Xenon.
coi.actualSchemaVersion = schemaversion.SchemaV21()
} else {
coi.actualSchemaVersion = schemaversion.DetermineSchemaVersion(coi.SchemaVersion)
logrus.Debugf("hcsshim::CreateContainer using schema %s", schemaversion.String(coi.actualSchemaVersion))
}
resources := &Resources{}
defer func() {
if err != nil {
if !coi.DoNotReleaseResourcesOnFailure {
ReleaseResources(resources, coi.HostingSystem, true)
}
}
}()
if coi.HostingSystem != nil {
n := coi.HostingSystem.ContainerCounter()
if coi.Spec.Linux != nil {
resources.containerRootInUVM = "/run/gcs/c/" + strconv.FormatUint(n, 16)
} else {
resources.containerRootInUVM = `C:\c\` + strconv.FormatUint(n, 16)
}
}
// Create a network namespace if necessary.
if coi.Spec.Windows != nil &&
coi.Spec.Windows.Network != nil &&
schemaversion.IsV21(coi.actualSchemaVersion) {
if coi.NetworkNamespace != "" {
resources.netNS = coi.NetworkNamespace
} else {
err := createNetworkNamespace(coi, resources)
if err != nil {
return nil, resources, err
}
}
coi.actualNetworkNamespace = resources.netNS
if coi.HostingSystem != nil {
endpoints, err := getNamespaceEndpoints(coi.actualNetworkNamespace)
if err != nil {
return nil, resources, err
}
err = coi.HostingSystem.AddNetNS(coi.actualNetworkNamespace, endpoints)
if err != nil {
return nil, resources, err
}
resources.addedNetNSToVM = true
}
}
var hcsDocument interface{}
logrus.Debugf("hcsshim::CreateContainer allocating resources")
if coi.Spec.Linux != nil {
if schemaversion.IsV10(coi.actualSchemaVersion) {
return nil, resources, errors.New("LCOW v1 not supported")
}
logrus.Debugf("hcsshim::CreateContainer allocateLinuxResources")
err = allocateLinuxResources(coi, resources)
if err != nil {
logrus.Debugf("failed to allocateLinuxResources %s", err)
return nil, resources, err
}
hcsDocument, err = createLinuxContainerDocument(coi, resources.containerRootInUVM)
if err != nil {
logrus.Debugf("failed createHCSContainerDocument %s", err)
return nil, resources, err
}
} else {
err = allocateWindowsResources(coi, resources)
if err != nil {
logrus.Debugf("failed to allocateWindowsResources %s", err)
return nil, resources, err
}
logrus.Debugf("hcsshim::CreateContainer creating container document")
hcsDocument, err = createWindowsContainerDocument(coi)
if err != nil {
logrus.Debugf("failed createHCSContainerDocument %s", err)
return nil, resources, err
}
}
logrus.Debugf("hcsshim::CreateContainer creating compute system")
system, err := hcs.CreateComputeSystem(coi.actualID, hcsDocument)
if err != nil {
logrus.Debugf("failed to CreateComputeSystem %s", err)
return nil, resources, err
}
return system, resources, err
}

View File

@@ -0,0 +1,78 @@
// +build windows,functional
//
// These unit tests must run on a system setup to run both Argons and Xenons,
// have docker installed, and have the nanoserver (WCOW) and alpine (LCOW)
// base images installed. The nanoserver image MUST match the build of the
// host.
//
// We rely on docker as the tools to extract a container image aren't
// open source. We use it to find the location of the base image on disk.
//
package hcsoci
//import (
// "bytes"
// "encoding/json"
// "io/ioutil"
// "os"
// "os/exec"
// "path/filepath"
// "strings"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// _ "github.com/Microsoft/hcsshim/test/assets"
// specs "github.com/opencontainers/runtime-spec/specs-go"
// "github.com/sirupsen/logrus"
//)
//func startUVM(t *testing.T, uvm *UtilityVM) {
// if err := uvm.Start(); err != nil {
// t.Fatalf("UVM %s Failed start: %s", uvm.Id, err)
// }
//}
//// Helper to shoot a utility VM
//func terminateUtilityVM(t *testing.T, uvm *UtilityVM) {
// if err := uvm.Terminate(); err != nil {
// t.Fatalf("Failed terminate utility VM %s", err)
// }
//}
//// TODO: Test UVMResourcesFromContainerSpec
//func TestUVMSizing(t *testing.T) {
// t.Skip("for now - not implemented at all")
//}
//// TestID validates that the requested ID is retrieved
//func TestID(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// Id: "gruntbuggly",
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// if c.ID() != "gruntbuggly" {
// t.Fatalf("id not set correctly: %s", c.ID())
// }
// c.Terminate()
//}

View File

@@ -0,0 +1,115 @@
// +build windows
package hcsoci
import (
"encoding/json"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
func createLCOWSpec(coi *createOptionsInternal) (*specs.Spec, error) {
// Remarshal the spec to perform a deep copy.
j, err := json.Marshal(coi.Spec)
if err != nil {
return nil, err
}
spec := &specs.Spec{}
err = json.Unmarshal(j, spec)
if err != nil {
return nil, err
}
// TODO
// Translate the mounts. The root has already been translated in
// allocateLinuxResources.
/*
for i := range spec.Mounts {
spec.Mounts[i].Source = "???"
spec.Mounts[i].Destination = "???"
}
*/
// Linux containers don't care about Windows aspects of the spec except the
// network namespace
spec.Windows = nil
if coi.Spec.Windows != nil &&
coi.Spec.Windows.Network != nil &&
coi.Spec.Windows.Network.NetworkNamespace != "" {
spec.Windows = &specs.Windows{
Network: &specs.WindowsNetwork{
NetworkNamespace: coi.Spec.Windows.Network.NetworkNamespace,
},
}
}
// Hooks are not supported (they should be run in the host)
spec.Hooks = nil
// Clear unsupported features
if spec.Linux.Resources != nil {
spec.Linux.Resources.Devices = nil
spec.Linux.Resources.Memory = nil
spec.Linux.Resources.Pids = nil
spec.Linux.Resources.BlockIO = nil
spec.Linux.Resources.HugepageLimits = nil
spec.Linux.Resources.Network = nil
}
spec.Linux.Seccomp = nil
// Clear any specified namespaces
var namespaces []specs.LinuxNamespace
for _, ns := range spec.Linux.Namespaces {
switch ns.Type {
case specs.NetworkNamespace:
default:
ns.Path = ""
namespaces = append(namespaces, ns)
}
}
spec.Linux.Namespaces = namespaces
return spec, nil
}
// This is identical to hcsschema.ComputeSystem but HostedSystem is an LCOW specific type - the schema docs only include WCOW.
type linuxComputeSystem struct {
Owner string `json:"Owner,omitempty"`
SchemaVersion *hcsschema.Version `json:"SchemaVersion,omitempty"`
HostingSystemId string `json:"HostingSystemId,omitempty"`
HostedSystem *linuxHostedSystem `json:"HostedSystem,omitempty"`
Container *hcsschema.Container `json:"Container,omitempty"`
VirtualMachine *hcsschema.VirtualMachine `json:"VirtualMachine,omitempty"`
ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"`
}
type linuxHostedSystem struct {
SchemaVersion *hcsschema.Version
OciBundlePath string
OciSpecification *specs.Spec
}
func createLinuxContainerDocument(coi *createOptionsInternal, guestRoot string) (interface{}, error) {
spec, err := createLCOWSpec(coi)
if err != nil {
return nil, err
}
logrus.Debugf("hcsshim::createLinuxContainerDoc: guestRoot:%s", guestRoot)
v2 := &linuxComputeSystem{
Owner: coi.actualOwner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
HostingSystemId: coi.HostingSystem.ID(),
HostedSystem: &linuxHostedSystem{
SchemaVersion: schemaversion.SchemaV21(),
OciBundlePath: guestRoot,
OciSpecification: spec,
},
}
return v2, nil
}

View File

@@ -0,0 +1,273 @@
// +build windows
package hcsoci
import (
"fmt"
"path/filepath"
"regexp"
"runtime"
"strings"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvmfolder"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
// createWindowsContainerDocument creates a document suitable for calling HCS to create
// a container, both hosted and process isolated. It can create both v1 and v2
// schema, WCOW only. The containers storage should have been mounted already.
func createWindowsContainerDocument(coi *createOptionsInternal) (interface{}, error) {
logrus.Debugf("hcsshim: CreateHCSContainerDocument")
// TODO: Make this safe if exported so no null pointer dereferences.
if coi.Spec == nil {
return nil, fmt.Errorf("cannot create HCS container document - OCI spec is missing")
}
if coi.Spec.Windows == nil {
return nil, fmt.Errorf("cannot create HCS container document - OCI spec Windows section is missing ")
}
v1 := &schema1.ContainerConfig{
SystemType: "Container",
Name: coi.actualID,
Owner: coi.actualOwner,
HvPartition: false,
IgnoreFlushesDuringBoot: coi.Spec.Windows.IgnoreFlushesDuringBoot,
}
// IgnoreFlushesDuringBoot is a property of the SCSI attachment for the scratch. Set when it's hot-added to the utility VM
// ID is a property on the create call in V2 rather than part of the schema.
v2 := &hcsschema.ComputeSystem{
Owner: coi.actualOwner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
}
v2Container := &hcsschema.Container{Storage: &hcsschema.Storage{}}
// TODO: Still want to revisit this.
if coi.Spec.Windows.LayerFolders == nil || len(coi.Spec.Windows.LayerFolders) < 2 {
return nil, fmt.Errorf("invalid spec - not enough layer folders supplied")
}
if coi.Spec.Hostname != "" {
v1.HostName = coi.Spec.Hostname
v2Container.GuestOs = &hcsschema.GuestOs{HostName: coi.Spec.Hostname}
}
if coi.Spec.Windows.Resources != nil {
if coi.Spec.Windows.Resources.CPU != nil {
if coi.Spec.Windows.Resources.CPU.Count != nil ||
coi.Spec.Windows.Resources.CPU.Shares != nil ||
coi.Spec.Windows.Resources.CPU.Maximum != nil {
v2Container.Processor = &hcsschema.Processor{}
}
if coi.Spec.Windows.Resources.CPU.Count != nil {
cpuCount := *coi.Spec.Windows.Resources.CPU.Count
hostCPUCount := uint64(runtime.NumCPU())
if cpuCount > hostCPUCount {
logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
cpuCount = hostCPUCount
}
v1.ProcessorCount = uint32(cpuCount)
v2Container.Processor.Count = int32(cpuCount)
}
if coi.Spec.Windows.Resources.CPU.Shares != nil {
v1.ProcessorWeight = uint64(*coi.Spec.Windows.Resources.CPU.Shares)
v2Container.Processor.Weight = int32(v1.ProcessorWeight)
}
if coi.Spec.Windows.Resources.CPU.Maximum != nil {
v1.ProcessorMaximum = int64(*coi.Spec.Windows.Resources.CPU.Maximum)
v2Container.Processor.Maximum = int32(v1.ProcessorMaximum)
}
}
if coi.Spec.Windows.Resources.Memory != nil {
if coi.Spec.Windows.Resources.Memory.Limit != nil {
v1.MemoryMaximumInMB = int64(*coi.Spec.Windows.Resources.Memory.Limit) / 1024 / 1024
v2Container.Memory = &hcsschema.Memory{SizeInMB: int32(v1.MemoryMaximumInMB)}
}
}
if coi.Spec.Windows.Resources.Storage != nil {
if coi.Spec.Windows.Resources.Storage.Bps != nil || coi.Spec.Windows.Resources.Storage.Iops != nil {
v2Container.Storage.QoS = &hcsschema.StorageQoS{}
}
if coi.Spec.Windows.Resources.Storage.Bps != nil {
v1.StorageBandwidthMaximum = *coi.Spec.Windows.Resources.Storage.Bps
v2Container.Storage.QoS.BandwidthMaximum = int32(v1.StorageBandwidthMaximum)
}
if coi.Spec.Windows.Resources.Storage.Iops != nil {
v1.StorageIOPSMaximum = *coi.Spec.Windows.Resources.Storage.Iops
v2Container.Storage.QoS.IopsMaximum = int32(*coi.Spec.Windows.Resources.Storage.Iops)
}
}
}
// TODO V2 networking. Only partial at the moment. v2.Container.Networking.Namespace specifically
if coi.Spec.Windows.Network != nil {
v2Container.Networking = &hcsschema.Networking{}
v1.EndpointList = coi.Spec.Windows.Network.EndpointList
v2Container.Networking.Namespace = coi.actualNetworkNamespace
v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery
v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery
if coi.Spec.Windows.Network.DNSSearchList != nil {
v1.DNSSearchList = strings.Join(coi.Spec.Windows.Network.DNSSearchList, ",")
v2Container.Networking.DnsSearchList = v1.DNSSearchList
}
v1.NetworkSharedContainerName = coi.Spec.Windows.Network.NetworkSharedContainerName
v2Container.Networking.NetworkSharedContainerName = v1.NetworkSharedContainerName
}
// // TODO V2 Credentials not in the schema yet.
if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok {
v1.Credentials = cs
}
if coi.Spec.Root == nil {
return nil, fmt.Errorf("spec is invalid - root isn't populated")
}
if coi.Spec.Root.Readonly {
return nil, fmt.Errorf(`invalid container spec - readonly is not supported for Windows containers`)
}
// Strip off the top-most RW/scratch layer as that's passed in separately to HCS for v1
v1.LayerFolderPath = coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
if (schemaversion.IsV21(coi.actualSchemaVersion) && coi.HostingSystem == nil) ||
(schemaversion.IsV10(coi.actualSchemaVersion) && coi.Spec.Windows.HyperV == nil) {
// Argon v1 or v2.
const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
if matched, err := regexp.MatchString(volumeGUIDRegex, coi.Spec.Root.Path); !matched || err != nil {
return nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, coi.Spec.Root.Path)
}
if coi.Spec.Root.Path[len(coi.Spec.Root.Path)-1] != '\\' {
coi.Spec.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat
}
v1.VolumePath = coi.Spec.Root.Path[:len(coi.Spec.Root.Path)-1] // Strip the trailing backslash. Required for v1.
v2Container.Storage.Path = coi.Spec.Root.Path
} else {
// A hosting system was supplied, implying v2 Xenon; OR a v1 Xenon.
if schemaversion.IsV10(coi.actualSchemaVersion) {
// V1 Xenon
v1.HvPartition = true
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.HyperV == nil { // Be resilient to nil de-reference
return nil, fmt.Errorf(`invalid container spec - Spec.Windows.HyperV is nil`)
}
if coi.Spec.Windows.HyperV.UtilityVMPath != "" {
// Client-supplied utility VM path
v1.HvRuntime = &schema1.HvRuntime{ImagePath: coi.Spec.Windows.HyperV.UtilityVMPath}
} else {
// Client was lazy. Let's locate it from the layer folders instead.
uvmImagePath, err := uvmfolder.LocateUVMFolder(coi.Spec.Windows.LayerFolders)
if err != nil {
return nil, err
}
v1.HvRuntime = &schema1.HvRuntime{ImagePath: filepath.Join(uvmImagePath, `UtilityVM`)}
}
} else {
// Hosting system was supplied, so is v2 Xenon.
v2Container.Storage.Path = coi.Spec.Root.Path
if coi.HostingSystem.OS() == "windows" {
layers, err := computeV2Layers(coi.HostingSystem, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1])
if err != nil {
return nil, err
}
v2Container.Storage.Layers = layers
}
}
}
if coi.HostingSystem == nil { // Argon v1 or v2
for _, layerPath := range coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1] {
layerID, err := wclayer.LayerID(layerPath)
if err != nil {
return nil, err
}
v1.Layers = append(v1.Layers, schema1.Layer{ID: layerID.String(), Path: layerPath})
v2Container.Storage.Layers = append(v2Container.Storage.Layers, hcsschema.Layer{Id: layerID.String(), Path: layerPath})
}
}
// Add the mounts as mapped directories or mapped pipes
// TODO: Mapped pipes to add in v2 schema.
var (
mdsv1 []schema1.MappedDir
mpsv1 []schema1.MappedPipe
mdsv2 []hcsschema.MappedDirectory
mpsv2 []hcsschema.MappedPipe
)
for _, mount := range coi.Spec.Mounts {
const pipePrefix = `\\.\pipe\`
if mount.Type != "" {
return nil, fmt.Errorf("invalid container spec - Mount.Type '%s' must not be set", mount.Type)
}
if strings.HasPrefix(strings.ToLower(mount.Destination), pipePrefix) {
mpsv1 = append(mpsv1, schema1.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
mpsv2 = append(mpsv2, hcsschema.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
} else {
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
}
}
mdv1 := schema1.MappedDir{HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: readOnly}
mdv2 := hcsschema.MappedDirectory{ContainerPath: mount.Destination, ReadOnly: readOnly}
if coi.HostingSystem == nil {
mdv2.HostPath = mount.Source
} else {
uvmPath, err := coi.HostingSystem.GetVSMBUvmPath(mount.Source)
if err != nil {
if err == uvm.ErrNotAttached {
// It could also be a scsi mount.
uvmPath, err = coi.HostingSystem.GetScsiUvmPath(mount.Source)
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
mdv2.HostPath = uvmPath
}
mdsv1 = append(mdsv1, mdv1)
mdsv2 = append(mdsv2, mdv2)
}
}
v1.MappedDirectories = mdsv1
v2Container.MappedDirectories = mdsv2
if len(mpsv1) > 0 && osversion.Get().Build < osversion.RS3 {
return nil, fmt.Errorf("named pipe mounts are not supported on this version of Windows")
}
v1.MappedPipes = mpsv1
v2Container.MappedPipes = mpsv2
// Put the v2Container object as a HostedSystem for a Xenon, or directly in the schema for an Argon.
if coi.HostingSystem == nil {
v2.Container = v2Container
} else {
v2.HostingSystemId = coi.HostingSystem.ID()
v2.HostedSystem = &hcsschema.HostedSystem{
SchemaVersion: schemaversion.SchemaV21(),
Container: v2Container,
}
}
if schemaversion.IsV10(coi.actualSchemaVersion) {
return v1, nil
}
return v2, nil
}

View File

@@ -0,0 +1,373 @@
// +build windows
package hcsoci
import (
"fmt"
"os"
"path"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/ospath"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type lcowLayerEntry struct {
hostPath string
uvmPath string
scsi bool
}
const scratchPath = "scratch"
// mountContainerLayers is a helper for clients to hide all the complexity of layer mounting
// Layer folder are in order: base, [rolayer1..rolayern,] scratch
//
// v1/v2: Argon WCOW: Returns the mount path on the host as a volume GUID.
// v1: Xenon WCOW: Done internally in HCS, so no point calling doing anything here.
// v2: Xenon WCOW: Returns a CombinedLayersV2 structure where ContainerRootPath is a folder
// inside the utility VM which is a GUID mapping of the scratch folder. Each
// of the layers are the VSMB locations where the read-only layers are mounted.
//
func MountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM) (interface{}, error) {
logrus.Debugln("hcsshim::mountContainerLayers", layerFolders)
if uvm == nil {
if len(layerFolders) < 2 {
return nil, fmt.Errorf("need at least two layers - base and scratch")
}
path := layerFolders[len(layerFolders)-1]
rest := layerFolders[:len(layerFolders)-1]
logrus.Debugln("hcsshim::mountContainerLayers ActivateLayer", path)
if err := wclayer.ActivateLayer(path); err != nil {
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Preparelayer", path, rest)
if err := wclayer.PrepareLayer(path, rest); err != nil {
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
}
return nil, err
}
mountPath, err := wclayer.GetLayerMountPath(path)
if err != nil {
if err := wclayer.UnprepareLayer(path); err != nil {
logrus.Warnf("Failed to Unprepare %s: %s", path, err)
}
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
}
return nil, err
}
return mountPath, nil
}
// V2 UVM
logrus.Debugf("hcsshim::mountContainerLayers Is a %s V2 UVM", uvm.OS())
// Add each read-only layers. For Windows, this is a VSMB share with the ResourceUri ending in
// a GUID based on the folder path. For Linux, this is a VPMEM device, except where is over the
// max size supported, where we put it on SCSI instead.
//
// Each layer is ref-counted so that multiple containers in the same utility VM can share them.
var wcowLayersAdded []string
var lcowlayersAdded []lcowLayerEntry
attachedSCSIHostPath := ""
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
var err error
if uvm.OS() == "windows" {
options := &hcsschema.VirtualSmbShareOptions{
ReadOnly: true,
PseudoOplocks: true,
TakeBackupPrivilege: true,
CacheIo: true,
ShareRead: true,
}
err = uvm.AddVSMB(layerPath, "", options)
if err == nil {
wcowLayersAdded = append(wcowLayersAdded, layerPath)
}
} else {
uvmPath := ""
hostPath := filepath.Join(layerPath, "layer.vhd")
var fi os.FileInfo
fi, err = os.Stat(hostPath)
if err == nil && uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
// Too big for PMEM. Add on SCSI instead (at /tmp/S<C>/<L>).
var (
controller int
lun int32
)
controller, lun, err = uvm.AddSCSILayer(hostPath)
if err == nil {
lcowlayersAdded = append(lcowlayersAdded,
lcowLayerEntry{
hostPath: hostPath,
uvmPath: fmt.Sprintf("/tmp/S%d/%d", controller, lun),
scsi: true,
})
}
} else {
_, uvmPath, err = uvm.AddVPMEM(hostPath, true) // UVM path is calculated. Will be /tmp/vN/
if err == nil {
lcowlayersAdded = append(lcowlayersAdded,
lcowLayerEntry{
hostPath: hostPath,
uvmPath: uvmPath,
})
}
}
}
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
}
// Add the scratch at an unused SCSI location. The container path inside the
// utility VM will be C:\<ID>.
hostPath := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
// BUGBUG Rename guestRoot better.
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
_, _, err := uvm.AddSCSI(hostPath, containerScratchPathInUVM, false)
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
attachedSCSIHostPath = hostPath
if uvm.OS() == "windows" {
// Load the filter at the C:\s<ID> location calculated above. We pass into this request each of the
// read-only layer folders.
layers, err := computeV2Layers(uvm, wcowLayersAdded)
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
guestRequest := guestrequest.CombinedLayers{
ContainerRootPath: containerScratchPathInUVM,
Layers: layers,
}
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
Settings: guestRequest,
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Add,
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
return guestRequest, nil
}
// This is the LCOW layout inside the utilityVM. NNN is the container "number"
// which increments for each container created in a utility VM.
//
// /run/gcs/c/NNN/config.json
// /run/gcs/c/NNN/rootfs
// /run/gcs/c/NNN/scratch/upper
// /run/gcs/c/NNN/scratch/work
//
// /dev/sda on /tmp/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
// /dev/pmem0 on /tmp/v0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
// /dev/sdb on /run/gcs/c/NNN/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
// overlay on /run/gcs/c/NNN/rootfs type overlay (rw,relatime,lowerdir=/tmp/v0,upperdir=/run/gcs/c/NNN/scratch/upper,workdir=/run/gcs/c/NNN/scratch/work)
//
// Where /dev/sda is the scratch for utility VM itself
// /dev/pmemX are read-only layers for containers
// /dev/sd(b...) are scratch spaces for each container
layers := []hcsschema.Layer{}
for _, l := range lcowlayersAdded {
layers = append(layers, hcsschema.Layer{Path: l.uvmPath})
}
guestRequest := guestrequest.CombinedLayers{
ContainerRootPath: path.Join(guestRoot, rootfsPath),
Layers: layers,
ScratchPath: containerScratchPathInUVM,
}
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Add,
Settings: guestRequest,
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
return guestRequest, nil
}
// UnmountOperation is used when calling Unmount() to determine what type of unmount is
// required. In V1 schema, this must be unmountOperationAll. In V2, client can
// be more optimal and only unmount what they need which can be a minor performance
// improvement (eg if you know only one container is running in a utility VM, and
// the UVM is about to be torn down, there's no need to unmount the VSMB shares,
// just SCSI to have a consistent file system).
type UnmountOperation uint
const (
UnmountOperationSCSI UnmountOperation = 0x01
UnmountOperationVSMB = 0x02
UnmountOperationVPMEM = 0x04
UnmountOperationAll = UnmountOperationSCSI | UnmountOperationVSMB | UnmountOperationVPMEM
)
// UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting
func UnmountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM, op UnmountOperation) error {
logrus.Debugln("hcsshim::unmountContainerLayers", layerFolders)
if uvm == nil {
// Must be an argon - folders are mounted on the host
if op != UnmountOperationAll {
return fmt.Errorf("only operation supported for host-mounted folders is unmountOperationAll")
}
if len(layerFolders) < 1 {
return fmt.Errorf("need at least one layer for Unmount")
}
path := layerFolders[len(layerFolders)-1]
logrus.Debugln("hcsshim::Unmount UnprepareLayer", path)
if err := wclayer.UnprepareLayer(path); err != nil {
return err
}
// TODO Should we try this anyway?
logrus.Debugln("hcsshim::unmountContainerLayers DeactivateLayer", path)
return wclayer.DeactivateLayer(path)
}
// V2 Xenon
// Base+Scratch as a minimum. This is different to v1 which only requires the scratch
if len(layerFolders) < 2 {
return fmt.Errorf("at least two layers are required for unmount")
}
var retError error
// Unload the storage filter followed by the SCSI scratch
if (op & UnmountOperationSCSI) == UnmountOperationSCSI {
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
logrus.Debugf("hcsshim::unmountContainerLayers CombinedLayers %s", containerScratchPathInUVM)
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Remove,
Settings: guestrequest.CombinedLayers{ContainerRootPath: containerScratchPathInUVM},
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
logrus.Errorf(err.Error())
}
// Hot remove the scratch from the SCSI controller
hostScratchFile := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
logrus.Debugf("hcsshim::unmountContainerLayers SCSI %s %s", containerScratchPathInUVM, hostScratchFile)
if err := uvm.RemoveSCSI(hostScratchFile); err != nil {
e := fmt.Errorf("failed to remove SCSI %s: %s", hostScratchFile, err)
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
// Remove each of the read-only layers from VSMB. These's are ref-counted and
// only removed once the count drops to zero. This allows multiple containers
// to share layers.
if uvm.OS() == "windows" && len(layerFolders) > 1 && (op&UnmountOperationVSMB) == UnmountOperationVSMB {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
if e := uvm.RemoveVSMB(layerPath); e != nil {
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
}
// Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted
// and only removed once the count drops to zero. This allows multiple containers to
// share layers. Note that SCSI is used on large layers.
if uvm.OS() == "linux" && len(layerFolders) > 1 && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
hostPath := filepath.Join(layerPath, "layer.vhd")
if fi, err := os.Stat(hostPath); err != nil {
var e error
if uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
e = uvm.RemoveSCSI(hostPath)
} else {
e = uvm.RemoveVPMEM(hostPath)
}
if e != nil {
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
}
}
// TODO (possibly) Consider deleting the container directory in the utility VM
return retError
}
func cleanupOnMountFailure(uvm *uvm.UtilityVM, wcowLayers []string, lcowLayers []lcowLayerEntry, scratchHostPath string) {
for _, wl := range wcowLayers {
if err := uvm.RemoveVSMB(wl); err != nil {
logrus.Warnf("Possibly leaked vsmbshare on error removal path: %s", err)
}
}
for _, ll := range lcowLayers {
if ll.scsi {
if err := uvm.RemoveSCSI(ll.hostPath); err != nil {
logrus.Warnf("Possibly leaked SCSI on error removal path: %s", err)
}
} else if err := uvm.RemoveVPMEM(ll.hostPath); err != nil {
logrus.Warnf("Possibly leaked vpmemdevice on error removal path: %s", err)
}
}
if scratchHostPath != "" {
if err := uvm.RemoveSCSI(scratchHostPath); err != nil {
logrus.Warnf("Possibly leaked SCSI disk on error removal path: %s", err)
}
}
}
func computeV2Layers(vm *uvm.UtilityVM, paths []string) (layers []hcsschema.Layer, err error) {
for _, path := range paths {
uvmPath, err := vm.GetVSMBUvmPath(path)
if err != nil {
return nil, err
}
layerID, err := wclayer.LayerID(path)
if err != nil {
return nil, err
}
layers = append(layers, hcsschema.Layer{Id: layerID.String(), Path: uvmPath})
}
return layers, nil
}

View File

@@ -0,0 +1,41 @@
package hcsoci
import (
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/sirupsen/logrus"
)
func createNetworkNamespace(coi *createOptionsInternal, resources *Resources) error {
netID, err := hns.CreateNamespace()
if err != nil {
return err
}
logrus.Infof("created network namespace %s for %s", netID, coi.ID)
resources.netNS = netID
resources.createdNetNS = true
for _, endpointID := range coi.Spec.Windows.Network.EndpointList {
err = hns.AddNamespaceEndpoint(netID, endpointID)
if err != nil {
return err
}
logrus.Infof("added network endpoint %s to namespace %s", endpointID, netID)
resources.networkEndpoints = append(resources.networkEndpoints, endpointID)
}
return nil
}
func getNamespaceEndpoints(netNS string) ([]*hns.HNSEndpoint, error) {
ids, err := hns.GetNamespaceEndpoints(netNS)
if err != nil {
return nil, err
}
var endpoints []*hns.HNSEndpoint
for _, id := range ids {
endpoint, err := hns.GetHNSEndpointByID(id)
if err != nil {
return nil, err
}
endpoints = append(endpoints, endpoint)
}
return endpoints, nil
}

View File

@@ -0,0 +1,127 @@
package hcsoci
import (
"os"
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/sirupsen/logrus"
)
// NetNS returns the network namespace for the container
func (r *Resources) NetNS() string {
return r.netNS
}
// Resources is the structure returned as part of creating a container. It holds
// nothing useful to clients, hence everything is lowercased. A client would use
// it in a call to ReleaseResource to ensure everything is cleaned up when a
// container exits.
type Resources struct {
// containerRootInUVM is the base path in a utility VM where elements relating
// to a container are exposed. For example, the mounted filesystem; the runtime
// spec (in the case of LCOW); overlay and scratch (in the case of LCOW).
//
// For WCOW, this will be under C:\c\N, and for LCOW this will
// be under /run/gcs/c/N. N is an atomic counter for each container created
// in that utility VM. For LCOW this is also the "OCI Bundle Path".
containerRootInUVM string
// layers is an array of the layer folder paths which have been mounted either on
// the host in the case or a WCOW Argon, or in a utility VM for WCOW Xenon and LCOW.
layers []string
// vsmbMounts is an array of the host-paths mounted into a utility VM to support
// (bind-)mounts into a WCOW v2 Xenon.
vsmbMounts []string
// plan9Mounts is an array of all the host paths which have been added to
// an LCOW utility VM
plan9Mounts []string
// netNS is the network namespace
netNS string
// networkEndpoints is the list of network endpoints used by the container
networkEndpoints []string
// createNetNS indicates if the network namespace has been created
createdNetNS bool
// addedNetNSToVM indicates if the network namespace has been added to the containers utility VM
addedNetNSToVM bool
// scsiMounts is an array of the host-paths mounted into a utility VM to
// support scsi device passthrough.
scsiMounts []string
}
// TODO: Method on the resources?
func ReleaseResources(r *Resources, vm *uvm.UtilityVM, all bool) error {
if vm != nil && r.addedNetNSToVM {
err := vm.RemoveNetNS(r.netNS)
if err != nil {
logrus.Warn(err)
}
r.addedNetNSToVM = false
}
if r.createdNetNS {
for len(r.networkEndpoints) != 0 {
endpoint := r.networkEndpoints[len(r.networkEndpoints)-1]
err := hns.RemoveNamespaceEndpoint(r.netNS, endpoint)
if err != nil {
if !os.IsNotExist(err) {
return err
}
logrus.Warnf("removing endpoint %s from namespace %s: does not exist", endpoint, r.NetNS())
}
r.networkEndpoints = r.networkEndpoints[:len(r.networkEndpoints)-1]
}
r.networkEndpoints = nil
err := hns.RemoveNamespace(r.netNS)
if err != nil && !os.IsNotExist(err) {
return err
}
r.createdNetNS = false
}
if len(r.layers) != 0 {
op := UnmountOperationSCSI
if vm == nil || all {
op = UnmountOperationAll
}
err := UnmountContainerLayers(r.layers, r.containerRootInUVM, vm, op)
if err != nil {
return err
}
r.layers = nil
}
if all {
for len(r.vsmbMounts) != 0 {
mount := r.vsmbMounts[len(r.vsmbMounts)-1]
if err := vm.RemoveVSMB(mount); err != nil {
return err
}
r.vsmbMounts = r.vsmbMounts[:len(r.vsmbMounts)-1]
}
for len(r.plan9Mounts) != 0 {
mount := r.plan9Mounts[len(r.plan9Mounts)-1]
if err := vm.RemovePlan9(mount); err != nil {
return err
}
r.plan9Mounts = r.plan9Mounts[:len(r.plan9Mounts)-1]
}
for _, path := range r.scsiMounts {
if err := vm.RemoveSCSI(path); err != nil {
return err
}
r.scsiMounts = nil
}
}
return nil
}

View File

@@ -0,0 +1,104 @@
// +build windows
package hcsoci
// Contains functions relating to a LCOW container, as opposed to a utility VM
import (
"fmt"
"path"
"strconv"
"strings"
"github.com/Microsoft/hcsshim/internal/guestrequest"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
const rootfsPath = "rootfs"
const mountPathPrefix = "m"
func allocateLinuxResources(coi *createOptionsInternal, resources *Resources) error {
if coi.Spec.Root == nil {
coi.Spec.Root = &specs.Root{}
}
if coi.Spec.Root.Path == "" {
logrus.Debugln("hcsshim::allocateLinuxResources mounting storage")
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
if err != nil {
return fmt.Errorf("failed to mount container storage: %s", err)
}
if coi.HostingSystem == nil {
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
} else {
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon LCOW
}
resources.layers = coi.Spec.Windows.LayerFolders
} else {
// This is the "Plan 9" root filesystem.
// TODO: We need a test for this. Ask @jstarks how you can even lay this out on Windows.
hostPath := coi.Spec.Root.Path
uvmPathForContainersFileSystem := path.Join(resources.containerRootInUVM, rootfsPath)
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly)
if err != nil {
return fmt.Errorf("adding plan9 root: %s", err)
}
coi.Spec.Root.Path = uvmPathForContainersFileSystem
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
}
for i, mount := range coi.Spec.Mounts {
switch mount.Type {
case "bind":
case "physical-disk":
case "virtual-disk":
default:
// Unknown mount type
continue
}
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}
if coi.HostingSystem != nil {
hostPath := mount.Source
uvmPathForShare := path.Join(resources.containerRootInUVM, mountPathPrefix+strconv.Itoa(i))
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
break
}
}
if mount.Type == "physical-disk" {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
}
resources.scsiMounts = append(resources.scsiMounts, hostPath)
coi.Spec.Mounts[i].Type = "none"
} else if mount.Type == "virtual-disk" {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSI(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
}
resources.scsiMounts = append(resources.scsiMounts, hostPath)
coi.Spec.Mounts[i].Type = "none"
} else {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount %+v", mount)
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding plan9 mount %+v: %s", mount, err)
}
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
}
coi.Spec.Mounts[i].Source = uvmPathForShare
}
}
return nil
}

View File

@@ -0,0 +1,127 @@
// +build windows
package hcsoci
// Contains functions relating to a WCOW container, as opposed to a utility VM
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/wclayer"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
func allocateWindowsResources(coi *createOptionsInternal, resources *Resources) error {
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.LayerFolders == nil {
return fmt.Errorf("field 'Spec.Windows.Layerfolders' is not populated")
}
scratchFolder := coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
logrus.Debugf("hcsshim::allocateWindowsResources scratch folder: %s", scratchFolder)
// TODO: Remove this code for auto-creation. Make the caller responsible.
// Create the directory for the RW scratch layer if it doesn't exist
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
logrus.Debugf("hcsshim::allocateWindowsResources container scratch folder does not exist so creating: %s ", scratchFolder)
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
return fmt.Errorf("failed to auto-create container scratch folder %s: %s", scratchFolder, err)
}
}
// Create sandbox.vhdx if it doesn't exist in the scratch folder. It's called sandbox.vhdx
// rather than scratch.vhdx as in the v1 schema, it's hard-coded in HCS.
if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) {
logrus.Debugf("hcsshim::allocateWindowsResources container sandbox.vhdx does not exist so creating in %s ", scratchFolder)
if err := wclayer.CreateScratchLayer(scratchFolder, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]); err != nil {
return fmt.Errorf("failed to CreateSandboxLayer %s", err)
}
}
if coi.Spec.Root == nil {
coi.Spec.Root = &specs.Root{}
}
if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) {
logrus.Debugln("hcsshim::allocateWindowsResources mounting storage")
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
if err != nil {
return fmt.Errorf("failed to mount container storage: %s", err)
}
if coi.HostingSystem == nil {
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
} else {
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon WCOW
}
resources.layers = coi.Spec.Windows.LayerFolders
}
// Validate each of the mounts. If this is a V2 Xenon, we have to add them as
// VSMB shares to the utility VM. For V1 Xenon and Argons, there's nothing for
// us to do as it's done by HCS.
for i, mount := range coi.Spec.Mounts {
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}
switch mount.Type {
case "":
case "physical-disk":
case "virtual-disk":
default:
return fmt.Errorf("invalid OCI spec - Type '%s' not supported", mount.Type)
}
if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) {
uvmPath := fmt.Sprintf("C:\\%s\\%d", coi.actualID, i)
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
break
}
}
if mount.Type == "physical-disk" {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(mount.Source, uvmPath, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
}
coi.Spec.Mounts[i].Type = ""
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
} else if mount.Type == "virtual-disk" {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSI(mount.Source, uvmPath, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
}
coi.Spec.Mounts[i].Type = ""
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
} else {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding VSMB share for OCI mount %+v", mount)
options := &hcsschema.VirtualSmbShareOptions{}
if readOnly {
options.ReadOnly = true
options.CacheIo = true
options.ShareRead = true
options.ForceLevelIIOplocks = true
break
}
err := coi.HostingSystem.AddVSMB(mount.Source, "", options)
if err != nil {
return fmt.Errorf("failed to add VSMB share to utility VM for mount %+v: %s", mount, err)
}
resources.vsmbMounts = append(resources.vsmbMounts, mount.Source)
}
}
}
return nil
}

View File

@@ -0,0 +1,260 @@
// +build windows,functional
package hcsoci
//import (
// "os"
// "path/filepath"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// specs "github.com/opencontainers/runtime-spec/specs-go"
//)
//// --------------------------------
//// W C O W A R G O N V 1
//// --------------------------------
//// A v1 Argon with a single base layer. It also validates hostname functionality is propagated.
//func TestV1Argon(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV10(),
// Id: "TestV1Argon",
// Owner: "unit-test",
// Spec: &specs.Spec{
// Hostname: "goofy",
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "goofy")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with a single base layer which uses the auto-mount capability
//func TestV1ArgonAutoMount(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersBusybox, tempDir)
// c, err := CreateContainer(&CreateOptions{
// Id: "TestV1ArgonAutoMount",
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with multiple layers which uses the auto-mount capability
//func TestV1ArgonMultipleBaseLayersAutoMount(t *testing.T) {
// t.Skip("fornow")
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layers := append(layersBusybox, containerScratchDir)
// c, err := CreateContainer(&CreateOptions{
// Id: "TestV1ArgonMultipleBaseLayersAutoMount",
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with a single mapped directory.
//func TestV1ArgonSingleMappedDirectory(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, c)
// c.Terminate()
//}
//// --------------------------------
//// W C O W A R G O N V 2
//// --------------------------------
//// A v2 Argon with a single base layer. It also validates hostname functionality is propagated.
//// It also uses an auto-generated ID.
//func TestV2Argon(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Hostname: "mickey",
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "mickey")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with multiple layers
//func TestV2ArgonMultipleBaseLayers(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersBusybox, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Id: "TestV2ArgonMultipleBaseLayers",
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with multiple layers which uses the auto-mount capability and auto-create
//func TestV2ArgonAutoMountMultipleBaseLayers(t *testing.T) {
// t.Skip("fornow")
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layers := append(layersBusybox, containerScratchDir)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Id: "TestV2ArgonAutoMountMultipleBaseLayers",
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with a single mapped directory.
//func TestV2ArgonSingleMappedDirectory(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, c)
// c.Terminate()
//}

View File

@@ -0,0 +1,365 @@
// +build windows,functional
package hcsoci
//import (
// "fmt"
// "os"
// "path/filepath"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// specs "github.com/opencontainers/runtime-spec/specs-go"
//)
//// --------------------------------
//// W C O W X E N O N V 2
//// --------------------------------
//// A single WCOW xenon. Note in this test, neither the UVM or the
//// containers are supplied IDs - they will be autogenerated for us.
//// This is the minimum set of parameters needed to create a V2 WCOW xenon.
//func TestV2XenonWCOW(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create the container hosted inside the utility VM
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}
//// TODO: Have a similar test where the UVM scratch folder does not exist.
//// A single WCOW xenon but where the container sandbox folder is not pre-created by the client
//func TestV2XenonWCOWContainerSandboxFolderDoesNotExist(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWContainerSandboxFolderDoesNotExist_UVM", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layerFolders := append(layersBusybox, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// Id: "container",
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}
//// TODO What about mount. Test with the client doing the mount.
//// TODO Test as above, but where sandbox for UVM is entirely created by a client to show how it's done.
//// Two v2 WCOW containers in the same UVM, each with a single base layer
//func TestV2XenonWCOWTwoContainers(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWTwoContainers_UVM", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // First hosted container
// firstContainerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(firstContainerScratchDir)
// firstLayerFolders := append(layersNanoserver, firstContainerScratchDir)
// firstHostedContainer, err := CreateContainer(&CreateOptions{
// Id: "FirstContainer",
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: firstLayerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(firstLayerFolders, uvm, unmountOperationAll)
// // Second hosted container
// secondContainerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(firstContainerScratchDir)
// secondLayerFolders := append(layersNanoserver, secondContainerScratchDir)
// secondHostedContainer, err := CreateContainer(&CreateOptions{
// Id: "SecondContainer",
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: secondLayerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(secondLayerFolders, uvm, unmountOperationAll)
// startContainer(t, firstHostedContainer)
// runCommand(t, firstHostedContainer, "cmd /s /c echo FirstContainer", `c:\`, "FirstContainer")
// startContainer(t, secondHostedContainer)
// runCommand(t, secondHostedContainer, "cmd /s /c echo SecondContainer", `c:\`, "SecondContainer")
// stopContainer(t, firstHostedContainer)
// stopContainer(t, secondHostedContainer)
// firstHostedContainer.Terminate()
// secondHostedContainer.Terminate()
//}
////// This verifies the container storage is unmounted correctly so that a second
////// container can be started from the same storage.
////func TestV2XenonWCOWWithRemount(t *testing.T) {
////// //t.Skip("Skipping for now")
//// uvmID := "Testv2XenonWCOWWithRestart_UVM"
//// uvmScratchDir, err := ioutil.TempDir("", "uvmScratch")
//// if err != nil {
//// t.Fatalf("Failed create temporary directory: %s", err)
//// }
//// if err := CreateWCOWSandbox(layersNanoserver[0], uvmScratchDir, uvmID); err != nil {
//// t.Fatalf("Failed create Windows UVM Sandbox: %s", err)
//// }
//// defer os.RemoveAll(uvmScratchDir)
//// uvm, err := CreateContainer(&CreateOptions{
//// Id: uvmID,
//// Owner: "unit-test",
//// SchemaVersion: SchemaV21(),
//// IsHostingSystem: true,
//// Spec: &specs.Spec{
//// Windows: &specs.Windows{
//// LayerFolders: []string{uvmScratchDir},
//// HyperV: &specs.WindowsHyperV{UtilityVMPath: filepath.Join(layersNanoserver[0], `UtilityVM\Files`)},
//// },
//// },
//// })
//// if err != nil {
//// t.Fatalf("Failed create UVM: %s", err)
//// }
//// defer uvm.Terminate()
//// if err := uvm.Start(); err != nil {
//// t.Fatalf("Failed start utility VM: %s", err)
//// }
//// // Mount the containers storage in the utility VM
//// containerScratchDir := createWCOWTempDirWithSandbox(t)
//// layerFolders := append(layersNanoserver, containerScratchDir)
//// cls, err := Mount(layerFolders, uvm, SchemaV21())
//// if err != nil {
//// t.Fatalf("failed to mount container storage: %s", err)
//// }
//// combinedLayers := cls.(CombinedLayersV2)
//// mountedLayers := &ContainersResourcesStorageV2{
//// Layers: combinedLayers.Layers,
//// Path: combinedLayers.ContainerRootPath,
//// }
//// defer func() {
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
//// t.Fatalf("failed to unmount container storage: %s", err)
//// }
//// }()
//// // Create the first container
//// defer os.RemoveAll(containerScratchDir)
//// xenon, err := CreateContainer(&CreateOptions{
//// Id: "container",
//// Owner: "unit-test",
//// HostingSystem: uvm,
//// SchemaVersion: SchemaV21(),
//// Spec: &specs.Spec{Windows: &specs.Windows{}}, // No layerfolders as we mounted them ourself.
//// })
//// if err != nil {
//// t.Fatalf("CreateContainer failed: %s", err)
//// }
//// // Start/stop the first container
//// startContainer(t, xenon)
//// runCommand(t, xenon, "cmd /s /c echo TestV2XenonWCOWFirstStart", `c:\`, "TestV2XenonWCOWFirstStart")
//// stopContainer(t, xenon)
//// xenon.Terminate()
//// // Now unmount and remount to exactly the same places
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
//// t.Fatalf("failed to unmount container storage: %s", err)
//// }
//// if _, err = Mount(layerFolders, uvm, SchemaV21()); err != nil {
//// t.Fatalf("failed to mount container storage: %s", err)
//// }
//// // Create an identical second container and verify it works too.
//// xenon2, err := CreateContainer(&CreateOptions{
//// Id: "container",
//// Owner: "unit-test",
//// HostingSystem: uvm,
//// SchemaVersion: SchemaV21(),
//// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
//// MountedLayers: mountedLayers,
//// })
//// if err != nil {
//// t.Fatalf("CreateContainer failed: %s", err)
//// }
//// startContainer(t, xenon2)
//// runCommand(t, xenon2, "cmd /s /c echo TestV2XenonWCOWAfterRemount", `c:\`, "TestV2XenonWCOWAfterRemount")
//// stopContainer(t, xenon2)
//// xenon2.Terminate()
////}
//// Lots of v2 WCOW containers in the same UVM, each with a single base layer. Containers aren't
//// actually started, but it stresses the SCSI controller hot-add logic.
//func TestV2XenonWCOWCreateLots(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWCreateLots", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // 63 as 0:0 is already taken as the UVMs scratch. So that leaves us with 64-1 left for container scratches on SCSI
// for i := 0; i < 63; i++ {
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// Id: fmt.Sprintf("container%d", i),
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer hostedContainer.Terminate()
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// }
// // TODO: Should check the internal structures here for VSMB and SCSI
// // TODO: Push it over 63 now and will get a failure.
//}
//// Helper for the v2 Xenon tests to create a utility VM. Returns the UtilityVM
//// object; folder used as its scratch
//func createv2WCOWUVM(t *testing.T, uvmLayers []string, uvmId string, resources *specs.WindowsResources) (*UtilityVM, string) {
// scratchDir := createTempDir(t)
// uvm := UtilityVM{
// OperatingSystem: "windows",
// LayerFolders: append(uvmLayers, scratchDir),
// Resources: resources,
// }
// if uvmId != "" {
// uvm.Id = uvmId
// }
// if err := uvm.Create(); err != nil {
// t.Fatalf("Failed create WCOW v2 UVM: %s", err)
// }
// if err := uvm.Start(); err != nil {
// t.Fatalf("Failed start WCOW v2UVM: %s", err)
// }
// return &uvm, scratchDir
//}
//// TestV2XenonWCOWMultiLayer creates a V2 Xenon having multiple image layers
//func TestV2XenonWCOWMultiLayer(t *testing.T) {
// t.Skip("for now")
// uvmMemory := uint64(1 * 1024 * 1024 * 1024)
// uvmCPUCount := uint64(2)
// resources := &specs.WindowsResources{
// Memory: &specs.WindowsMemoryResources{
// Limit: &uvmMemory,
// },
// CPU: &specs.WindowsCPUResources{
// Count: &uvmCPUCount,
// },
// }
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWMultiLayer_UVM", resources)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create a sandbox for the hosted container
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// // Create the container. Note that this will auto-mount for us.
// containerLayers := append(layersBusybox, containerScratchDir)
// xenon, err := CreateContainer(&CreateOptions{
// Id: "container",
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: containerLayers}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// // Start/stop the container
// startContainer(t, xenon)
// runCommand(t, xenon, "echo Container", `c:\`, "Container")
// stopContainer(t, xenon)
// xenon.Terminate()
// // TODO Move this to a defer function to fail if it fails.
// if err := unmountContainerLayers(containerLayers, uvm, unmountOperationAll); err != nil {
// t.Fatalf("unmount failed: %s", err)
// }
//}
//// TestV2XenonWCOWSingleMappedDirectory tests a V2 Xenon WCOW with a single mapped directory
//func TestV2XenonWCOWSingleMappedDirectory(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create the container hosted inside the utility VM
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// hostedContainer, err := CreateContainer(&CreateOptions{
// HostingSystem: uvm,
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layerFolders},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // TODO BUGBUG NEED TO UNMOUNT TO VSMB SHARE FOR THE CONTAINER
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}

View File

@@ -23,7 +23,9 @@ type HNSEndpoint struct {
DisableICC bool `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
IsRemoteEndpoint bool `json:",omitempty"`
EnableLowMetric bool `json:",omitempty"`
Namespace *Namespace `json:",omitempty"`
EncapOverhead uint16 `json:",omitempty"`
}
//SystemType represents the type of the system on which actions are done

View File

@@ -20,6 +20,7 @@ type ELBPolicy struct {
SourceVIP string `json:"SourceVIP,omitempty"`
VIPs []string `json:"VIPs,omitempty"`
ILB bool `json:"ILB,omitempty"`
DSR bool `json:"IsDSR,omitempty"`
}
// LBPolicy is a structure defining schema for LoadBalancing based Policy

View File

@@ -1,4 +1,4 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
// Code generated mksyscall_windows.exe DO NOT EDIT
package hns
@@ -6,7 +6,6 @@ import (
"syscall"
"unsafe"
"github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
@@ -68,7 +67,10 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16)
}
r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}

View File

@@ -5,9 +5,9 @@ import (
"unsafe"
)
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go interop.go
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
//sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree
func ConvertAndFreeCoTaskMemString(buffer *uint16) string {
str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:])

View File

@@ -1,4 +1,4 @@
// Code generated by 'go generate'; DO NOT EDIT.
// Code generated mksyscall_windows.exe DO NOT EDIT
package interop
@@ -37,9 +37,9 @@ func errnoErr(e syscall.Errno) error {
}
var (
modole32 = windows.NewLazySystemDLL("ole32.dll")
modapi_ms_win_core_com_l1_1_0 = windows.NewLazySystemDLL("api-ms-win-core-com-l1-1-0.dll")
procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
procCoTaskMemFree = modapi_ms_win_core_com_l1_1_0.NewProc("CoTaskMemFree")
)
func coTaskMemFree(buffer unsafe.Pointer) {

View File

@@ -0,0 +1,9 @@
package lcow
const (
// DefaultScratchSizeGB is the size of the default LCOW scratch disk in GB
DefaultScratchSizeGB = 20
// defaultVhdxBlockSizeMB is the block-size for the scratch VHDx's this package can create.
defaultVhdxBlockSizeMB = 1
)

View File

@@ -0,0 +1,55 @@
package lcow
//func debugCommand(s string) string {
// return fmt.Sprintf(`echo -e 'DEBUG COMMAND: %s\\n--------------\\n';%s;echo -e '\\n\\n';`, s, s)
//}
// DebugLCOWGCS extracts logs from the GCS in LCOW. It's a useful hack for debugging,
// but not necessarily optimal, but all that is available to us in RS3.
//func (container *container) DebugLCOWGCS() {
// if logrus.GetLevel() < logrus.DebugLevel || len(os.Getenv("HCSSHIM_LCOW_DEBUG_ENABLE")) == 0 {
// return
// }
// var out bytes.Buffer
// cmd := os.Getenv("HCSSHIM_LCOW_DEBUG_COMMAND")
// if cmd == "" {
// cmd = `sh -c "`
// cmd += debugCommand("kill -10 `pidof gcs`") // SIGUSR1 for stackdump
// cmd += debugCommand("ls -l /tmp")
// cmd += debugCommand("cat /tmp/gcs.log")
// cmd += debugCommand("cat /tmp/gcs/gcs-stacks*")
// cmd += debugCommand("cat /tmp/gcs/paniclog*")
// cmd += debugCommand("ls -l /tmp/gcs")
// cmd += debugCommand("ls -l /tmp/gcs/*")
// cmd += debugCommand("cat /tmp/gcs/*/config.json")
// cmd += debugCommand("ls -lR /var/run/gcsrunc")
// cmd += debugCommand("cat /tmp/gcs/global-runc.log")
// cmd += debugCommand("cat /tmp/gcs/*/runc.log")
// cmd += debugCommand("ps -ef")
// cmd += `"`
// }
// proc, _, err := container.CreateProcessEx(
// &CreateProcessEx{
// OCISpecification: &specs.Spec{
// Process: &specs.Process{Args: []string{cmd}},
// Linux: &specs.Linux{},
// },
// CreateInUtilityVm: true,
// Stdout: &out,
// })
// defer func() {
// if proc != nil {
// proc.Kill()
// proc.Close()
// }
// }()
// if err != nil {
// logrus.Debugln("benign failure getting gcs logs: ", err)
// }
// if proc != nil {
// proc.WaitTimeout(time.Duration(int(time.Second) * 30))
// }
// logrus.Debugf("GCS Debugging:\n%s\n\nEnd GCS Debugging", strings.TrimSpace(out.String()))
//}

View File

@@ -0,0 +1,161 @@
package lcow
import (
"fmt"
"io"
"strings"
"time"
"github.com/Microsoft/hcsshim/internal/copywithtimeout"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/schema2"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// ByteCounts are the number of bytes copied to/from standard handles. Note
// this is int64 rather than uint64 to match the golang io.Copy() signature.
type ByteCounts struct {
In int64
Out int64
Err int64
}
// ProcessOptions are the set of options which are passed to CreateProcessEx() to
// create a utility vm.
type ProcessOptions struct {
HCSSystem *hcs.System
Process *specs.Process
Stdin io.Reader // Optional reader for sending on to the processes stdin stream
Stdout io.Writer // Optional writer for returning the processes stdout stream
Stderr io.Writer // Optional writer for returning the processes stderr stream
CopyTimeout time.Duration // Timeout for the copy
CreateInUtilityVm bool // If the compute system is a utility VM
ByteCounts ByteCounts // How much data to copy on each stream if they are supplied. 0 means to io.EOF.
}
// CreateProcess creates a process either in an LCOW utility VM, or for starting
// the init process. TODO: Potentially extend for exec'd processes.
//
// It's essentially a glorified wrapper around hcs.ComputeSystem CreateProcess used
// for internal purposes.
//
// This is used on LCOW to run processes for remote filesystem commands, utilities,
// and debugging.
//
// It optional performs IO copies with timeout between the pipes provided as input,
// and the pipes in the process.
//
// In the ProcessOptions structure, if byte-counts are non-zero, a maximum of those
// bytes are copied to the appropriate standard IO reader/writer. When zero,
// it copies until EOF. It also returns byte-counts indicating how much data
// was sent/received from the process.
//
// It is the responsibility of the caller to call Close() on the process returned.
func CreateProcess(opts *ProcessOptions) (*hcs.Process, *ByteCounts, error) {
var environment = make(map[string]string)
copiedByteCounts := &ByteCounts{}
if opts == nil {
return nil, nil, fmt.Errorf("no options supplied")
}
if opts.HCSSystem == nil {
return nil, nil, fmt.Errorf("no HCS system supplied")
}
if opts.CreateInUtilityVm && opts.Process == nil {
return nil, nil, fmt.Errorf("process must be supplied for UVM process")
}
// Don't pass a process in if this is an LCOW container. This will start the init process.
if opts.Process != nil {
for _, v := range opts.Process.Env {
s := strings.SplitN(v, "=", 2)
if len(s) == 2 && len(s[1]) > 0 {
environment[s[0]] = s[1]
}
}
if _, ok := environment["PATH"]; !ok {
environment["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
}
}
processConfig := &ProcessParameters{
ProcessParameters: hcsschema.ProcessParameters{
CreateStdInPipe: (opts.Stdin != nil),
CreateStdOutPipe: (opts.Stdout != nil),
CreateStdErrPipe: (opts.Stderr != nil),
EmulateConsole: false,
},
CreateInUtilityVm: opts.CreateInUtilityVm,
}
if opts.Process != nil {
processConfig.Environment = environment
processConfig.CommandLine = strings.Join(opts.Process.Args, " ")
processConfig.WorkingDirectory = opts.Process.Cwd
if processConfig.WorkingDirectory == "" {
processConfig.WorkingDirectory = `/`
}
}
proc, err := opts.HCSSystem.CreateProcess(processConfig)
if err != nil {
logrus.Debugf("failed to create process: %s", err)
return nil, nil, err
}
processStdin, processStdout, processStderr, err := proc.Stdio()
if err != nil {
proc.Kill() // Should this have a timeout?
proc.Close()
return nil, nil, fmt.Errorf("failed to get stdio pipes for process %+v: %s", processConfig, err)
}
// Send the data into the process's stdin
if opts.Stdin != nil {
if copiedByteCounts.In, err = copywithtimeout.Copy(processStdin,
opts.Stdin,
opts.ByteCounts.In,
"stdin",
opts.CopyTimeout); err != nil {
return nil, nil, err
}
// Don't need stdin now we've sent everything. This signals GCS that we are finished sending data.
if err := proc.CloseStdin(); err != nil && !hcs.IsNotExist(err) && !hcs.IsAlreadyClosed(err) {
// This error will occur if the compute system is currently shutting down
if perr, ok := err.(*hcs.ProcessError); ok && perr.Err != hcs.ErrVmcomputeOperationInvalidState {
return nil, nil, err
}
}
}
// Copy the data back from stdout
if opts.Stdout != nil {
// Copy the data over to the writer.
if copiedByteCounts.Out, err = copywithtimeout.Copy(opts.Stdout,
processStdout,
opts.ByteCounts.Out,
"stdout",
opts.CopyTimeout); err != nil {
return nil, nil, err
}
}
// Copy the data back from stderr
if opts.Stderr != nil {
// Copy the data over to the writer.
if copiedByteCounts.Err, err = copywithtimeout.Copy(opts.Stderr,
processStderr,
opts.ByteCounts.Err,
"stderr",
opts.CopyTimeout); err != nil {
return nil, nil, err
}
}
return proc, copiedByteCounts, nil
}

View File

@@ -0,0 +1,168 @@
package lcow
import (
"bytes"
"fmt"
"os"
"strings"
"time"
"github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/internal/copyfile"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/uvm"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// CreateScratch uses a utility VM to create an empty scratch disk of a requested size.
// It has a caching capability. If the cacheFile exists, and the request is for a default
// size, a copy of that is made to the target. If the size is non-default, or the cache file
// does not exist, it uses a utility VM to create target. It is the responsibility of the
// caller to synchronise simultaneous attempts to create the cache file.
func CreateScratch(lcowUVM *uvm.UtilityVM, destFile string, sizeGB uint32, cacheFile string, vmID string) error {
if lcowUVM == nil {
return fmt.Errorf("no uvm")
}
if lcowUVM.OS() != "linux" {
return fmt.Errorf("CreateLCOWScratch requires a linux utility VM to operate!")
}
// Smallest we can accept is the default scratch size as we can't size down, only expand.
if sizeGB < DefaultScratchSizeGB {
sizeGB = DefaultScratchSizeGB
}
logrus.Debugf("hcsshim::CreateLCOWScratch: Dest:%s size:%dGB cache:%s", destFile, sizeGB, cacheFile)
// Retrieve from cache if the default size and already on disk
if cacheFile != "" && sizeGB == DefaultScratchSizeGB {
if _, err := os.Stat(cacheFile); err == nil {
if err := copyfile.CopyFile(cacheFile, destFile, false); err != nil {
return fmt.Errorf("failed to copy cached file '%s' to '%s': %s", cacheFile, destFile, err)
}
logrus.Debugf("hcsshim::CreateLCOWScratch: %s fulfilled from cache (%s)", destFile, cacheFile)
return nil
}
}
// Create the VHDX
if err := vhd.CreateVhdx(destFile, sizeGB, defaultVhdxBlockSizeMB); err != nil {
return fmt.Errorf("failed to create VHDx %s: %s", destFile, err)
}
controller, lun, err := lcowUVM.AddSCSI(destFile, "", false) // No destination as not formatted
if err != nil {
return err
}
logrus.Debugf("hcsshim::CreateLCOWScratch: %s at C=%d L=%d", destFile, controller, lun)
// Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory
startTime := time.Now()
for {
testdCommand := []string{"test", "-d", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d", controller, lun)}
testdProc, _, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
CreateInUtilityVm: true,
CopyTimeout: timeout.ExternalCommandToStart,
Process: &specs.Process{Args: testdCommand},
})
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to run %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err)
}
defer testdProc.Close()
testdProc.WaitTimeout(timeout.ExternalCommandToComplete)
testdExitCode, err := testdProc.ExitCode()
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to get exit code from from %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err)
}
if testdExitCode != 0 {
currentTime := time.Now()
elapsedTime := currentTime.Sub(startTime)
if elapsedTime > timeout.TestDRetryLoop {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", testdCommand, testdExitCode, destFile)
}
} else {
break
}
time.Sleep(time.Millisecond * 10)
}
// Get the device from under the block subdirectory by doing a simple ls. This will come back as (eg) `sda`
var lsOutput bytes.Buffer
lsCommand := []string{"ls", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", controller, lun)}
lsProc, _, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
CreateInUtilityVm: true,
CopyTimeout: timeout.ExternalCommandToStart,
Process: &specs.Process{Args: lsCommand},
Stdout: &lsOutput,
})
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
}
defer lsProc.Close()
lsProc.WaitTimeout(timeout.ExternalCommandToComplete)
lsExitCode, err := lsProc.ExitCode()
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
}
if lsExitCode != 0 {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", lsCommand, lsExitCode, destFile)
}
device := fmt.Sprintf(`/dev/%s`, strings.TrimSpace(lsOutput.String()))
logrus.Debugf("hcsshim: CreateExt4Vhdx: %s: device at %s", destFile, device)
// Format it ext4
mkfsCommand := []string{"mkfs.ext4", "-q", "-E", "lazy_itable_init=1", "-O", `^has_journal,sparse_super2,uninit_bg,^resize_inode`, device}
var mkfsStderr bytes.Buffer
mkfsProc, _, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
CreateInUtilityVm: true,
CopyTimeout: timeout.ExternalCommandToStart,
Process: &specs.Process{Args: mkfsCommand},
Stderr: &mkfsStderr,
})
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err)
}
defer mkfsProc.Close()
mkfsProc.WaitTimeout(timeout.ExternalCommandToComplete)
mkfsExitCode, err := mkfsProc.ExitCode()
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err)
}
if mkfsExitCode != 0 {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM: %s", mkfsCommand, mkfsExitCode, destFile, strings.TrimSpace(mkfsStderr.String()))
}
// Hot-Remove before we copy it
if err := lcowUVM.RemoveSCSI(destFile); err != nil {
return fmt.Errorf("failed to hot-remove: %s", err)
}
// Populate the cache.
if cacheFile != "" && (sizeGB == DefaultScratchSizeGB) {
if err := copyfile.CopyFile(destFile, cacheFile, true); err != nil {
return fmt.Errorf("failed to seed cache '%s' from '%s': %s", destFile, cacheFile, err)
}
}
logrus.Debugf("hcsshim::CreateLCOWScratch: %s created (non-cache)", destFile)
return nil
}

View File

@@ -0,0 +1,46 @@
package lcow
import (
"fmt"
"io"
"os"
"time"
"github.com/Microsoft/hcsshim/internal/uvm"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// TarToVhd streams a tarstream contained in an io.Reader to a fixed vhd file
func TarToVhd(lcowUVM *uvm.UtilityVM, targetVHDFile string, reader io.Reader) (int64, error) {
logrus.Debugf("hcsshim: TarToVhd: %s", targetVHDFile)
if lcowUVM == nil {
return 0, fmt.Errorf("no utility VM passed")
}
//defer uvm.DebugLCOWGCS()
outFile, err := os.Create(targetVHDFile)
if err != nil {
return 0, fmt.Errorf("tar2vhd failed to create %s: %s", targetVHDFile, err)
}
defer outFile.Close()
// BUGBUG Delete the file on failure
tar2vhd, byteCounts, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
Process: &specs.Process{Args: []string{"tar2vhd"}},
CreateInUtilityVm: true,
Stdin: reader,
Stdout: outFile,
CopyTimeout: 2 * time.Minute,
})
if err != nil {
return 0, fmt.Errorf("failed to start tar2vhd for %s: %s", targetVHDFile, err)
}
defer tar2vhd.Close()
logrus.Debugf("hcsshim: TarToVhd: %s created, %d bytes", targetVHDFile, byteCounts.Out)
return byteCounts.Out, err
}

View File

@@ -0,0 +1,11 @@
package lcow
import "github.com/Microsoft/hcsshim/internal/schema2"
// Additional fields to hcsschema.ProcessParameters used by LCOW
type ProcessParameters struct {
hcsschema.ProcessParameters
CreateInUtilityVm bool `json:",omitempty"`
OCIProcess interface{} `json:"OciProcess,omitempty"`
}

View File

@@ -0,0 +1,75 @@
package lcow
import (
"fmt"
"io"
// "os"
"github.com/Microsoft/hcsshim/internal/uvm"
// specs "github.com/opencontainers/runtime-spec/specs-go"
// "github.com/sirupsen/logrus"
)
// VhdToTar does what is says - it exports a VHD in a specified
// folder (either a read-only layer.vhd, or a read-write scratch vhdx) to a
// ReadCloser containing a tar-stream of the layers contents.
func VhdToTar(lcowUVM *uvm.UtilityVM, vhdFile string, uvmMountPath string, isContainerScratch bool, vhdSize int64) (io.ReadCloser, error) {
return nil, fmt.Errorf("not implemented yet")
// logrus.Debugf("hcsshim: VhdToTar: %s isScratch: %t", vhdFile, isContainerScratch)
// if lcowUVM == nil {
// return nil, fmt.Errorf("cannot VhdToTar as no utility VM is in configuration")
// }
// //defer uvm.DebugLCOWGCS()
// vhdHandle, err := os.Open(vhdFile)
// if err != nil {
// return nil, fmt.Errorf("hcsshim: VhdToTar: failed to open %s: %s", vhdFile, err)
// }
// defer vhdHandle.Close()
// logrus.Debugf("hcsshim: VhdToTar: exporting %s, size %d, isScratch %t", vhdHandle.Name(), vhdSize, isContainerScratch)
// // Different binary depending on whether a RO layer or a RW scratch
// command := "vhd2tar"
// if isContainerScratch {
// command = fmt.Sprintf("exportSandbox -path %s", uvmMountPath)
// }
// // tar2vhd, byteCounts, err := lcowUVM.CreateProcess(&uvm.ProcessOptions{
// // Process: &specs.Process{Args: []string{"tar2vhd"}},
// // Stdin: reader,
// // Stdout: outFile,
// // })
// // Start the binary in the utility VM
// proc, stdin, stdout, _, err := config.createLCOWUVMProcess(command)
// if err != nil {
// return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to create utils process %s: %s", vhdHandle.Name(), command, err)
// }
// if !isContainerScratch {
// // Send the VHD contents to the utility VM processes stdin handle if not a container scratch
// logrus.Debugf("hcsshim: VhdToTar: copying the layer VHD into the utility VM")
// if _, err = copyWithTimeout(stdin, vhdHandle, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: sending %s to %s", vhdHandle.Name(), command)); err != nil {
// proc.Close()
// return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to copyWithTimeout on the stdin pipe (to utility VM): %s", vhdHandle.Name(), err)
// }
// }
// // Start a goroutine which copies the stdout (ie the tar stream)
// reader, writer := io.Pipe()
// go func() {
// defer writer.Close()
// defer proc.Close()
// logrus.Debugf("hcsshim: VhdToTar: copying tar stream back from the utility VM")
// bytes, err := copyWithTimeout(writer, stdout, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: copy tarstream from %s", command))
// if err != nil {
// logrus.Errorf("hcsshim: VhdToTar: %s: copyWithTimeout on the stdout pipe (from utility VM) failed: %s", vhdHandle.Name(), err)
// }
// logrus.Debugf("hcsshim: VhdToTar: copied %d bytes of the tarstream of %s from the utility VM", bytes, vhdHandle.Name())
// }()
// // Return the read-side of the pipe connected to the goroutine which is reading from the stdout of the process in the utility VM
// return reader, nil
}

View File

@@ -0,0 +1,37 @@
package logfields
const (
// Identifiers
ContainerID = "cid"
UVMID = "uvm-id"
ProcessID = "pid"
// Common Misc
// Timeout represents an operation timeout.
Timeout = "timeout"
JSON = "json"
// Keys/values
Field = "field"
OCIAnnotation = "oci-annotation"
Value = "value"
// Golang type's
ExpectedType = "expected-type"
Bool = "bool"
Uint32 = "uint32"
Uint64 = "uint64"
// HCS
HCSOperation = "hcs-op"
HCSOperationResult = "hcs-op-result"
// runhcs
VMShimOperation = "vmshim-op"
)

View File

@@ -0,0 +1,79 @@
// Package ociwclayer provides functions for importing and exporting Windows
// container layers from and to their OCI tar representation.
package ociwclayer
import (
"io"
"path/filepath"
"github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
)
var driverInfo = hcsshim.DriverInfo{}
// ExportLayer writes an OCI layer tar stream from the provided on-disk layer.
// The caller must specify the parent layers, if any, ordered from lowest to
// highest layer.
//
// The layer will be mounted for this process, so the caller should ensure that
// it is not currently mounted.
func ExportLayer(w io.Writer, path string, parentLayerPaths []string) error {
err := hcsshim.ActivateLayer(driverInfo, path)
if err != nil {
return err
}
defer hcsshim.DeactivateLayer(driverInfo, path)
// Prepare and unprepare the layer to ensure that it has been initialized.
err = hcsshim.PrepareLayer(driverInfo, path, parentLayerPaths)
if err != nil {
return err
}
err = hcsshim.UnprepareLayer(driverInfo, path)
if err != nil {
return err
}
r, err := hcsshim.NewLayerReader(driverInfo, path, parentLayerPaths)
if err != nil {
return err
}
err = writeTarFromLayer(r, w)
cerr := r.Close()
if err != nil {
return err
}
return cerr
}
func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
t := tar.NewWriter(w)
for {
name, size, fileInfo, err := r.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if fileInfo == nil {
// Write a whiteout file.
hdr := &tar.Header{
Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), whiteoutPrefix+filepath.Base(name))),
}
err := t.WriteHeader(hdr)
if err != nil {
return err
}
} else {
err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
if err != nil {
return err
}
}
}
return t.Close()
}

View File

@@ -0,0 +1,141 @@
package ociwclayer
import (
"bufio"
"io"
"os"
"path"
"path/filepath"
"strings"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
)
const whiteoutPrefix = ".wh."
var (
// mutatedFiles is a list of files that are mutated by the import process
// and must be backed up and restored.
mutatedFiles = map[string]string{
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
}
)
// ImportLayer reads a layer from an OCI layer tar stream and extracts it to the
// specified path. The caller must specify the parent layers, if any, ordered
// from lowest to highest layer.
//
// The caller must ensure that the thread or process has acquired backup and
// restore privileges.
//
// This function returns the total size of the layer's files, in bytes.
func ImportLayer(r io.Reader, path string, parentLayerPaths []string) (int64, error) {
err := os.MkdirAll(path, 0)
if err != nil {
return 0, err
}
w, err := hcsshim.NewLayerWriter(hcsshim.DriverInfo{}, path, parentLayerPaths)
if err != nil {
return 0, err
}
n, err := writeLayerFromTar(r, w, path)
cerr := w.Close()
if err != nil {
return 0, err
}
if cerr != nil {
return 0, cerr
}
return n, nil
}
func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
t := tar.NewReader(r)
hdr, err := t.Next()
totalSize := int64(0)
buf := bufio.NewWriter(nil)
for err == nil {
base := path.Base(hdr.Name)
if strings.HasPrefix(base, whiteoutPrefix) {
name := path.Join(path.Dir(hdr.Name), base[len(whiteoutPrefix):])
err = w.Remove(filepath.FromSlash(name))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else if hdr.Typeflag == tar.TypeLink {
err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else {
var (
name string
size int64
fileInfo *winio.FileBasicInfo
)
name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr)
if err != nil {
return 0, err
}
err = w.Add(filepath.FromSlash(name), fileInfo)
if err != nil {
return 0, err
}
hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
totalSize += size
}
}
if err != io.EOF {
return 0, err
}
return totalSize, nil
}
// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
// writes it to a backup stream, and also saves any files that will be mutated
// by the import layer process to a backup location.
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
var bcdBackup *os.File
var bcdBackupWriter *winio.BackupFileWriter
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
if err != nil {
return nil, err
}
defer func() {
cerr := bcdBackup.Close()
if err == nil {
err = cerr
}
}()
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
defer func() {
cerr := bcdBackupWriter.Close()
if err == nil {
err = cerr
}
}()
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
} else {
buf.Reset(w)
}
defer func() {
ferr := buf.Flush()
if err == nil {
err = ferr
}
}()
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
}

View File

@@ -0,0 +1,14 @@
package ospath
import (
"path"
"path/filepath"
)
// Join joins paths using the target OS's path separator.
func Join(os string, elem ...string) string {
if os == "windows" {
return filepath.Join(elem...)
}
return path.Join(elem...)
}

View File

@@ -0,0 +1,287 @@
package regstate
import (
"encoding/json"
"fmt"
"net/url"
"os"
"path/filepath"
"reflect"
"syscall"
"golang.org/x/sys/windows/registry"
)
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go
//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
const (
_REG_OPTION_VOLATILE = 1
_REG_CREATED_NEW_KEY = 1
_REG_OPENED_EXISTING_KEY = 2
)
type Key struct {
registry.Key
Name string
}
var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"}
var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"}
var rootPath = `SOFTWARE\Microsoft\runhcs`
type NotFoundError struct {
Id string
}
func (err *NotFoundError) Error() string {
return fmt.Sprintf("ID '%s' was not found", err.Id)
}
func IsNotFoundError(err error) bool {
_, ok := err.(*NotFoundError)
return ok
}
type NoStateError struct {
ID string
Key string
}
func (err *NoStateError) Error() string {
return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID)
}
func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) {
var (
h syscall.Handle
d uint32
)
fullpath := filepath.Join(k.Name, path)
err = regCreateKeyEx(syscall.Handle(k.Key), syscall.StringToUTF16Ptr(path), 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d)
if err != nil {
return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err}
}
return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil
}
func hive(perUser bool) *Key {
r := localMachine
if perUser {
r = localUser
}
return r
}
func Open(root string, perUser bool) (*Key, error) {
k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS)
if err != nil {
return nil, err
}
defer k.Close()
k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS)
if err != nil {
return nil, err
}
return k2, nil
}
func RemoveAll(root string, perUser bool) error {
k, err := hive(perUser).open(rootPath)
if err != nil {
return err
}
defer k.Close()
r, err := k.open(url.PathEscape(root))
if err != nil {
return err
}
defer r.Close()
ids, err := r.Enumerate()
if err != nil {
return err
}
for _, id := range ids {
err = r.Remove(id)
if err != nil {
return err
}
}
r.Close()
return k.Remove(root)
}
func (k *Key) Close() error {
err := k.Key.Close()
k.Key = 0
return err
}
func (k *Key) Enumerate() ([]string, error) {
escapedIDs, err := k.ReadSubKeyNames(0)
if err != nil {
return nil, err
}
var ids []string
for _, e := range escapedIDs {
id, err := url.PathUnescape(e)
if err == nil {
ids = append(ids, id)
}
}
return ids, nil
}
func (k *Key) open(name string) (*Key, error) {
fullpath := filepath.Join(k.Name, name)
nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS)
if err != nil {
return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err}
}
return &Key{nk, fullpath}, nil
}
func (k *Key) openid(id string) (*Key, error) {
escaped := url.PathEscape(id)
fullpath := filepath.Join(k.Name, escaped)
nk, err := k.open(escaped)
if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND {
return nil, &NotFoundError{id}
}
if err != nil {
return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err}
}
return nk, nil
}
func (k *Key) Remove(id string) error {
escaped := url.PathEscape(id)
err := registry.DeleteKey(k.Key, escaped)
if err != nil {
if err == syscall.ERROR_FILE_NOT_FOUND {
return &NotFoundError{id}
}
return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err}
}
return nil
}
func (k *Key) set(id string, create bool, key string, state interface{}) error {
var sk *Key
var err error
if create {
var existing bool
eid := url.PathEscape(id)
sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS)
if err != nil {
return err
}
defer sk.Close()
if existing {
sk.Close()
return fmt.Errorf("container %s already exists", id)
}
} else {
sk, err = k.openid(id)
if err != nil {
return err
}
defer sk.Close()
}
switch reflect.TypeOf(state).Kind() {
case reflect.Bool:
v := uint32(0)
if state.(bool) {
v = 1
}
err = sk.SetDWordValue(key, v)
case reflect.Int:
err = sk.SetQWordValue(key, uint64(state.(int)))
case reflect.String:
err = sk.SetStringValue(key, state.(string))
default:
var js []byte
js, err = json.Marshal(state)
if err != nil {
return err
}
err = sk.SetBinaryValue(key, js)
}
if err != nil {
if err == syscall.ERROR_FILE_NOT_FOUND {
return &NoStateError{id, key}
}
return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err}
}
return nil
}
func (k *Key) Create(id, key string, state interface{}) error {
return k.set(id, true, key, state)
}
func (k *Key) Set(id, key string, state interface{}) error {
return k.set(id, false, key, state)
}
func (k *Key) Clear(id, key string) error {
sk, err := k.openid(id)
if err != nil {
return err
}
defer sk.Close()
err = sk.DeleteValue(key)
if err != nil {
if err == syscall.ERROR_FILE_NOT_FOUND {
return &NoStateError{id, key}
}
return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err}
}
return nil
}
func (k *Key) Get(id, key string, state interface{}) error {
sk, err := k.openid(id)
if err != nil {
return err
}
defer sk.Close()
var js []byte
switch reflect.TypeOf(state).Elem().Kind() {
case reflect.Bool:
var v uint64
v, _, err = sk.GetIntegerValue(key)
if err == nil {
*state.(*bool) = v != 0
}
case reflect.Int:
var v uint64
v, _, err = sk.GetIntegerValue(key)
if err == nil {
*state.(*int) = int(v)
}
case reflect.String:
var v string
v, _, err = sk.GetStringValue(key)
if err == nil {
*state.(*string) = string(v)
}
default:
js, _, err = sk.GetBinaryValue(key)
}
if err != nil {
if err == syscall.ERROR_FILE_NOT_FOUND {
return &NoStateError{id, key}
}
return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err}
}
if js != nil {
err = json.Unmarshal(js, state)
}
return err
}

View File

@@ -0,0 +1,185 @@
package regstate
import (
"os"
"testing"
)
var testKey = "runhcs-test-test-key"
func prepTest(t *testing.T) {
err := RemoveAll(testKey, true)
if err != nil && !os.IsNotExist(err) {
t.Fatal(err)
}
}
func TestLifetime(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
ids, err := k.Enumerate()
if err != nil {
t.Fatal(err)
}
if len(ids) != 0 {
t.Fatal("wrong count", len(ids))
}
id := "a/b/c"
key := "key"
err = k.Set(id, key, 1)
if err == nil {
t.Fatal("expected error")
}
var i int
err = k.Get(id, key, &i)
if err == nil {
t.Fatal("expected error")
}
err = k.Create(id, key, 2)
if err != nil {
t.Fatal(err)
}
ids, err = k.Enumerate()
if err != nil {
t.Fatal(err)
}
if len(ids) != 1 {
t.Fatal("wrong count", len(ids))
}
if ids[0] != id {
t.Fatal("wrong value", ids[0])
}
err = k.Get(id, key, &i)
if err != nil {
t.Fatal(err)
}
if i != 2 {
t.Fatal("got wrong value", i)
}
err = k.Set(id, key, 3)
if err != nil {
t.Fatal(err)
}
err = k.Get(id, key, &i)
if err != nil {
t.Fatal(err)
}
if i != 3 {
t.Fatal("got wrong value", i)
}
err = k.Remove(id)
if err != nil {
t.Fatal(err)
}
err = k.Remove(id)
if err == nil {
t.Fatal("expected error")
}
ids, err = k.Enumerate()
if err != nil {
t.Fatal(err)
}
if len(ids) != 0 {
t.Fatal("wrong count", len(ids))
}
}
func TestBool(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
err = k.Create(id, key, true)
if err != nil {
t.Fatal(err)
}
b := false
err = k.Get(id, key, &b)
if err != nil {
t.Fatal(err)
}
if !b {
t.Fatal("value did not marshal correctly")
}
}
func TestInt(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
err = k.Create(id, key, 10)
if err != nil {
t.Fatal(err)
}
v := 0
err = k.Get(id, key, &v)
if err != nil {
t.Fatal(err)
}
if v != 10 {
t.Fatal("value did not marshal correctly")
}
}
func TestString(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
err = k.Create(id, key, "blah")
if err != nil {
t.Fatal(err)
}
v := ""
err = k.Get(id, key, &v)
if err != nil {
t.Fatal(err)
}
if v != "blah" {
t.Fatal("value did not marshal correctly")
}
}
func TestJson(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
v := struct{ X int }{5}
err = k.Create(id, key, &v)
if err != nil {
t.Fatal(err)
}
v.X = 0
err = k.Get(id, key, &v)
if err != nil {
t.Fatal(err)
}
if v.X != 5 {
t.Fatal("value did not marshal correctly: ", v)
}
}

View File

@@ -0,0 +1,51 @@
// Code generated by 'go generate'; DO NOT EDIT.
package regstate
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
)
func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
return
}

View File

@@ -0,0 +1,10 @@
package requesttype
// These are constants for v2 schema modify requests.
// RequestType const
const (
Add = "Add"
Remove = "Remove"
PreAdd = "PreAdd" // For networking
)

View File

@@ -0,0 +1,71 @@
package runhcs
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/guid"
)
// ContainerState represents the platform agnostic pieces relating to a
// running container's status and state
type ContainerState struct {
// Version is the OCI version for the container
Version string `json:"ociVersion"`
// ID is the container ID
ID string `json:"id"`
// InitProcessPid is the init process id in the parent namespace
InitProcessPid int `json:"pid"`
// Status is the current status of the container, running, paused, ...
Status string `json:"status"`
// Bundle is the path on the filesystem to the bundle
Bundle string `json:"bundle"`
// Rootfs is a path to a directory containing the container's root filesystem.
Rootfs string `json:"rootfs"`
// Created is the unix timestamp for the creation time of the container in UTC
Created time.Time `json:"created"`
// Annotations is the user defined annotations added to the config.
Annotations map[string]string `json:"annotations,omitempty"`
// The owner of the state directory (the owner of the container).
Owner string `json:"owner"`
}
// GetErrorFromPipe returns reads from `pipe` and verifies if the operation
// returned success or error. If error converts that to an error and returns. If
// `p` is not nill will issue a `Kill` and `Wait` for exit.
func GetErrorFromPipe(pipe io.Reader, p *os.Process) error {
serr, err := ioutil.ReadAll(pipe)
if err != nil {
return err
}
if bytes.Equal(serr, ShimSuccess) {
return nil
}
extra := ""
if p != nil {
p.Kill()
state, err := p.Wait()
if err != nil {
panic(err)
}
extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode)
}
if len(serr) == 0 {
return fmt.Errorf("unknown shim failure%s", extra)
}
return errors.New(string(serr))
}
// VMPipePath returns the named pipe path for the vm shim.
func VMPipePath(hostUniqueID guid.GUID) string {
return SafePipePath("runhcs-vm-" + hostUniqueID.String())
}

View File

@@ -0,0 +1,16 @@
package runhcs
import "net/url"
const (
SafePipePrefix = `\\.\pipe\ProtectedPrefix\Administrators\`
)
// ShimSuccess is the byte stream returned on a successful operation.
var ShimSuccess = []byte{0, 'O', 'K', 0}
func SafePipePath(name string) string {
// Use a pipe in the Administrators protected prefixed to prevent malicious
// squatting.
return SafePipePrefix + url.PathEscape(name)
}

View File

@@ -0,0 +1,17 @@
package runhcs
import (
"testing"
)
func Test_SafePipePath(t *testing.T) {
tests := []string{"test", "test with spaces", "test/with\\\\.\\slashes", "test.with..dots..."}
expected := []string{"test", "test%20with%20spaces", "test%2Fwith%5C%5C.%5Cslashes", "test.with..dots..."}
for i, test := range tests {
actual := SafePipePath(test)
e := SafePipePrefix + expected[i]
if actual != e {
t.Fatalf("SafePipePath: actual '%s' != '%s'", actual, expected[i])
}
}
}

View File

@@ -0,0 +1,43 @@
package runhcs
import (
"encoding/json"
"github.com/Microsoft/go-winio"
)
// VMRequestOp is an operation that can be issued to a VM shim.
type VMRequestOp string
const (
// OpCreateContainer is a create container request.
OpCreateContainer VMRequestOp = "create"
// OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM.
OpSyncNamespace VMRequestOp = "sync"
// OpUnmountContainer is a container unmount request.
OpUnmountContainer VMRequestOp = "unmount"
// OpUnmountContainerDiskOnly is a container unmount disk request.
OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk"
)
// VMRequest is an operation request that is issued to a VM shim.
type VMRequest struct {
ID string
Op VMRequestOp
}
// IssueVMRequest issues a request to a shim at the given pipe.
func IssueVMRequest(pipepath string, req *VMRequest) error {
pipe, err := winio.DialPipe(pipepath, nil)
if err != nil {
return err
}
defer pipe.Close()
if err := json.NewEncoder(pipe).Encode(req); err != nil {
return err
}
if err := GetErrorFromPipe(pipe, nil); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,125 @@
// +build admin
package safefile
import (
"os"
"path/filepath"
"syscall"
"testing"
)
func TestOpenRelative(t *testing.T) {
badroot, err := tempRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(badroot.Name())
defer badroot.Close()
root, err := tempRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root.Name())
defer root.Close()
// Create a file
f, err := OpenRelative("foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0)
if err != nil {
t.Fatal(err)
}
f.Close()
// Create a directory
err = MkdirRelative("dir", root)
if err != nil {
t.Fatal(err)
}
// Create a file in the bad root
f, err = os.Create(filepath.Join(badroot.Name(), "badfile"))
if err != nil {
t.Fatal(err)
}
f.Close()
// Create a directory symlink to the bad root
err = os.Symlink(badroot.Name(), filepath.Join(root.Name(), "dsymlink"))
if err != nil {
t.Fatal(err)
}
// Create a file symlink to the bad file
err = os.Symlink(filepath.Join(badroot.Name(), "badfile"), filepath.Join(root.Name(), "symlink"))
if err != nil {
t.Fatal(err)
}
// Make sure opens cannot happen through the symlink
f, err = OpenRelative("dsymlink/foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0)
if err == nil {
f.Close()
t.Fatal("created file in wrong tree!")
}
t.Log(err)
// Check again using EnsureNotReparsePointRelative
err = EnsureNotReparsePointRelative("dsymlink", root)
if err == nil {
t.Fatal("reparse check should have failed")
}
t.Log(err)
// Make sure links work
err = LinkRelative("foo", root, "hardlink", root)
if err != nil {
t.Fatal(err)
}
// Even inside directories
err = LinkRelative("foo", root, "dir/bar", root)
if err != nil {
t.Fatal(err)
}
// Make sure links cannot happen through the symlink
err = LinkRelative("foo", root, "dsymlink/hardlink", root)
if err == nil {
f.Close()
t.Fatal("created link in wrong tree!")
}
t.Log(err)
// In either direction
err = LinkRelative("dsymlink/badfile", root, "bar", root)
if err == nil {
f.Close()
t.Fatal("created link in wrong tree!")
}
t.Log(err)
// Make sure remove cannot happen through the symlink
err = RemoveRelative("symlink/badfile", root)
if err == nil {
t.Fatal("remove in wrong tree!")
}
// Remove the symlink
err = RemoveAllRelative("symlink", root)
if err != nil {
t.Fatal(err)
}
// Make sure it's not possible to escape with .. (NT doesn't support .. at the kernel level)
f, err = OpenRelative("..", root, syscall.GENERIC_READ, syscall.FILE_SHARE_READ, FILE_OPEN, 0)
if err == nil {
t.Fatal("escaped the directory")
}
t.Log(err)
// Should not have touched the other directory
if _, err = os.Lstat(filepath.Join(badroot.Name(), "badfile")); err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,53 @@
package safefile
import (
"io/ioutil"
"os"
"path/filepath"
"syscall"
"testing"
winio "github.com/Microsoft/go-winio"
)
func tempRoot() (*os.File, error) {
name, err := ioutil.TempDir("", "hcsshim-test")
if err != nil {
return nil, err
}
f, err := OpenRoot(name)
if err != nil {
os.Remove(name)
return nil, err
}
return f, nil
}
func TestRemoveRelativeReadOnly(t *testing.T) {
root, err := tempRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root.Name())
defer root.Close()
p := filepath.Join(root.Name(), "foo")
f, err := os.Create(p)
if err != nil {
t.Fatal(err)
}
defer f.Close()
bi := winio.FileBasicInfo{}
bi.FileAttributes = syscall.FILE_ATTRIBUTE_READONLY
err = winio.SetFileBasicInfo(f, &bi)
if err != nil {
t.Fatal(err)
}
f.Close()
err = RemoveRelative("foo", root)
if err != nil {
t.Fatal(err)
}
}

View File

@@ -3,6 +3,8 @@ package schema1
import (
"encoding/json"
"time"
"github.com/Microsoft/hcsshim/internal/schema2"
)
// ProcessConfig is used as both the input of Container.CreateProcess
@@ -115,9 +117,10 @@ type ComputeSystemQuery struct {
type PropertyType string
const (
PropertyTypeStatistics PropertyType = "Statistics"
PropertyTypeProcessList = "ProcessList"
PropertyTypeMappedVirtualDisk = "MappedVirtualDisk"
PropertyTypeStatistics PropertyType = "Statistics" // V1 and V2
PropertyTypeProcessList = "ProcessList" // V1 and V2
PropertyTypeMappedVirtualDisk = "MappedVirtualDisk" // Not supported in V2 schema call
PropertyTypeGuestConnection = "GuestConnection" // V1 and V2. Nil return from HCS before RS5
)
type PropertyQuery struct {
@@ -142,6 +145,7 @@ type ContainerProperties struct {
Statistics Statistics `json:",omitempty"`
ProcessList []ProcessListItem `json:",omitempty"`
MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"`
GuestConnectionInfo GuestConnectionInfo `json:",omitempty"`
}
// MemoryStats holds the memory statistics for a container
@@ -206,6 +210,19 @@ type MappedVirtualDiskController struct {
MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"`
}
// GuestDefinedCapabilities is part of the GuestConnectionInfo returned by a GuestConnection call on a utility VM
type GuestDefinedCapabilities struct {
NamespaceAddRequestSupported bool `json:",omitempty"`
SignalProcessSupported bool `json:",omitempty"`
}
// GuestConnectionInfo is the structure of an iterm return by a GuestConnection call on a utility VM
type GuestConnectionInfo struct {
SupportedSchemaVersions []hcsschema.Version `json:",omitempty"`
ProtocolVersion uint32 `json:",omitempty"`
GuestDefinedCapabilities GuestDefinedCapabilities `json:",omitempty"`
}
// Type of Request Support in ModifySystem
type RequestType string

View File

@@ -0,0 +1,31 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Attachment struct {
Type_ string `json:"Type,omitempty"`
Path string `json:"Path,omitempty"`
IgnoreFlushes bool `json:"IgnoreFlushes,omitempty"`
CachingMode string `json:"CachingMode,omitempty"`
NoWriteHardening bool `json:"NoWriteHardening,omitempty"`
DisableExpansionOptimization bool `json:"DisableExpansionOptimization,omitempty"`
IgnoreRelativeLocator bool `json:"IgnoreRelativeLocator,omitempty"`
CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`
}

View File

@@ -0,0 +1,13 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Battery struct {
}

View File

@@ -0,0 +1,19 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type CacheQueryStatsResponse struct {
L3OccupancyBytes int32 `json:"L3OccupancyBytes,omitempty"`
L3TotalBwBytes int32 `json:"L3TotalBwBytes,omitempty"`
L3LocalBwBytes int32 `json:"L3LocalBwBytes,omitempty"`
}

View File

@@ -0,0 +1,27 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Chipset struct {
Uefi *Uefi `json:"Uefi,omitempty"`
IsNumLockDisabled bool `json:"IsNumLockDisabled,omitempty"`
BaseBoardSerialNumber string `json:"BaseBoardSerialNumber,omitempty"`
ChassisSerialNumber string `json:"ChassisSerialNumber,omitempty"`
ChassisAssetTag string `json:"ChassisAssetTag,omitempty"`
UseUtc bool `json:"UseUtc,omitempty"`
// LinuxKernelDirect - Added in v2.2 Builds >=181117
LinuxKernelDirect *LinuxKernelDirect `json:"LinuxKernelDirect,omitempty"`
}

View File

@@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type CloseHandle struct {
Handle string `json:"Handle,omitempty"`
}

View File

@@ -0,0 +1,18 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port.
type ComPort struct {
NamedPipe string `json:"NamedPipe,omitempty"`
OptimizeForDebugger bool `json:"OptimizeForDebugger,omitempty"`
}

View File

@@ -0,0 +1,27 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ComputeSystem struct {
Owner string `json:"Owner,omitempty"`
SchemaVersion *Version `json:"SchemaVersion,omitempty"`
HostingSystemId string `json:"HostingSystemId,omitempty"`
HostedSystem *HostedSystem `json:"HostedSystem,omitempty"`
Container *Container `json:"Container,omitempty"`
VirtualMachine *VirtualMachine `json:"VirtualMachine,omitempty"`
ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"`
}

View File

@@ -0,0 +1,72 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
import (
"net/http"
)
// contextKeys are used to identify the type of value in the context.
// Since these are string, it is possible to get a short description of the
// context key for logging and debugging using key.String().
type contextKey string
func (c contextKey) String() string {
return "auth " + string(c)
}
var (
// ContextOAuth2 takes a oauth2.TokenSource as authentication for the request.
ContextOAuth2 = contextKey("token")
// ContextBasicAuth takes BasicAuth as authentication for the request.
ContextBasicAuth = contextKey("basic")
// ContextAccessToken takes a string oauth2 access token as authentication for the request.
ContextAccessToken = contextKey("accesstoken")
// ContextAPIKey takes an APIKey as authentication for the request
ContextAPIKey = contextKey("apikey")
)
// BasicAuth provides basic http authentication to a request passed via context using ContextBasicAuth
type BasicAuth struct {
UserName string `json:"userName,omitempty"`
Password string `json:"password,omitempty"`
}
// APIKey provides API key based authentication to a request passed via context using ContextAPIKey
type APIKey struct {
Key string
Prefix string
}
type Configuration struct {
BasePath string `json:"basePath,omitempty"`
Host string `json:"host,omitempty"`
Scheme string `json:"scheme,omitempty"`
DefaultHeader map[string]string `json:"defaultHeader,omitempty"`
UserAgent string `json:"userAgent,omitempty"`
HTTPClient *http.Client
}
func NewConfiguration() *Configuration {
cfg := &Configuration{
BasePath: "https://localhost",
DefaultHeader: make(map[string]string),
UserAgent: "Swagger-Codegen/2.1.0/go",
}
return cfg
}
func (c *Configuration) AddDefaultHeader(key string, value string) {
c.DefaultHeader[key] = value
}

View File

@@ -0,0 +1,17 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ConsoleSize struct {
Height int32 `json:"Height,omitempty"`
Width int32 `json:"Width,omitempty"`
}

View File

@@ -0,0 +1,35 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Container struct {
GuestOs *GuestOs `json:"GuestOs,omitempty"`
Storage *Storage `json:"Storage,omitempty"`
MappedDirectories []MappedDirectory `json:"MappedDirectories,omitempty"`
MappedPipes []MappedPipe `json:"MappedPipes,omitempty"`
Memory *Memory `json:"Memory,omitempty"`
Processor *Processor `json:"Processor,omitempty"`
Networking *Networking `json:"Networking,omitempty"`
HvSocket *HvSocket `json:"HvSocket,omitempty"`
ContainerCredentialGuard *ContainerCredentialGuardState `json:"ContainerCredentialGuard,omitempty"`
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
AssignedDevices []Device `json:"AssignedDevices,omitempty"`
}

View File

@@ -0,0 +1,25 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ContainerCredentialGuardState struct {
// Authentication cookie for calls to a Container Credential Guard instance.
Cookie string `json:"Cookie,omitempty"`
// Name of the RPC endpoint of the Container Credential Guard instance.
RpcEndpoint string `json:"RpcEndpoint,omitempty"`
// Transport used for the configured Container Credential Guard instance.
Transport string `json:"Transport,omitempty"`
// Credential spec used for the configured Container Credential Guard instance.
CredentialSpec string `json:"CredentialSpec,omitempty"`
}

View File

@@ -0,0 +1,26 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// memory usage as viewed from within the container
type ContainerMemoryInformation struct {
TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"`
TotalUsage int32 `json:"TotalUsage,omitempty"`
CommittedBytes int32 `json:"CommittedBytes,omitempty"`
SharedCommittedBytes int32 `json:"SharedCommittedBytes,omitempty"`
CommitLimitBytes int32 `json:"CommitLimitBytes,omitempty"`
PeakCommitmentBytes int32 `json:"PeakCommitmentBytes,omitempty"`
}

View File

@@ -0,0 +1,16 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Device struct {
// The interface class guid of the device to assign to container.
InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"`
}

View File

@@ -0,0 +1,43 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Devices struct {
ComPorts map[string]ComPort `json:"ComPorts,omitempty"`
Scsi map[string]Scsi `json:"Scsi,omitempty"`
VirtualPMem *VirtualPMemController `json:"VirtualPMem,omitempty"`
NetworkAdapters map[string]NetworkAdapter `json:"NetworkAdapters,omitempty"`
VideoMonitor *VideoMonitor `json:"VideoMonitor,omitempty"`
Keyboard *Keyboard `json:"Keyboard,omitempty"`
Mouse *Mouse `json:"Mouse,omitempty"`
HvSocket *HvSocket2 `json:"HvSocket,omitempty"`
EnhancedModeVideo *EnhancedModeVideo `json:"EnhancedModeVideo,omitempty"`
GuestCrashReporting *GuestCrashReporting `json:"GuestCrashReporting,omitempty"`
VirtualSmb *VirtualSmb `json:"VirtualSmb,omitempty"`
Plan9 *Plan9 `json:"Plan9,omitempty"`
Battery *Battery `json:"Battery,omitempty"`
FlexibleIov map[string]FlexibleIoDevice `json:"FlexibleIov,omitempty"`
SharedMemory *SharedMemoryConfiguration `json:"SharedMemory,omitempty"`
}

View File

@@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type EnhancedModeVideo struct {
ConnectionOptions *RdpConnectionOptions `json:"ConnectionOptions,omitempty"`
}

View File

@@ -0,0 +1,19 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type FlexibleIoDevice struct {
EmulatorId string `json:"EmulatorId,omitempty"`
HostingModel string `json:"HostingModel,omitempty"`
Configuration []string `json:"Configuration,omitempty"`
}

View File

@@ -0,0 +1,19 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type GuestConnection struct {
// Use Vsock rather than Hyper-V sockets to communicate with the guest service.
UseVsock bool `json:"UseVsock,omitempty"`
// Don't disconnect the guest connection when pausing the virtual machine.
UseConnectedSuspend bool `json:"UseConnectedSuspend,omitempty"`
}

View File

@@ -0,0 +1,21 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// Information about the guest.
type GuestConnectionInfo struct {
// Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities.
SupportedSchemaVersions []Version `json:"SupportedSchemaVersions,omitempty"`
ProtocolVersion int32 `json:"ProtocolVersion,omitempty"`
GuestDefinedCapabilities *interface{} `json:"GuestDefinedCapabilities,omitempty"`
}

View File

@@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type GuestCrashReporting struct {
WindowsCrashSettings *WindowsCrashReporting `json:"WindowsCrashSettings,omitempty"`
}

View File

@@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type GuestOs struct {
HostName string `json:"HostName,omitempty"`
}

View File

@@ -0,0 +1,22 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type GuestState struct {
// The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state.
GuestStateFilePath string `json:"GuestStateFilePath,omitempty"`
// The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state.
RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"`
// If true, the guest state and runtime state files will be used as templates to populate transient, in-memory state instead of using the files as persistent backing store.
ForceTransientState bool `json:"ForceTransientState,omitempty"`
}

View File

@@ -0,0 +1,17 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type HostedSystem struct {
SchemaVersion *Version `json:"SchemaVersion,omitempty"`
Container *Container `json:"Container,omitempty"`
}

View File

@@ -0,0 +1,17 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type HvSocket struct {
Config *HvSocketSystemConfig `json:"Config,omitempty"`
EnablePowerShellDirect bool `json:"EnablePowerShellDirect,omitempty"`
}

View File

@@ -0,0 +1,16 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// HvSocket configuration for a VM
type HvSocket2 struct {
HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"`
}

View File

@@ -0,0 +1,22 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type HvSocketServiceConfig struct {
// SDDL string that HvSocket will check before allowing a host process to bind to this specific service. If not specified, defaults to the system DefaultBindSecurityDescriptor, defined in HvSocketSystemWpConfig in V1.
BindSecurityDescriptor string `json:"BindSecurityDescriptor,omitempty"`
// SDDL string that HvSocket will check before allowing a host process to connect to this specific service. If not specified, defaults to the system DefaultConnectSecurityDescriptor, defined in HvSocketSystemWpConfig in V1.
ConnectSecurityDescriptor string `json:"ConnectSecurityDescriptor,omitempty"`
// If true, HvSocket will process wildcard binds for this service/system combination. Wildcard binds are secured in the registry at SOFTWARE/Microsoft/Windows NT/CurrentVersion/Virtualization/HvSocket/WildcardDescriptors
AllowWildcardBinds bool `json:"AllowWildcardBinds,omitempty"`
}

View File

@@ -0,0 +1,22 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1.
type HvSocketSystemConfig struct {
// SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds).
DefaultBindSecurityDescriptor string `json:"DefaultBindSecurityDescriptor,omitempty"`
// SDDL string that HvSocket will check before allowing a host process to connect to an unlisted service in the VM/container.
DefaultConnectSecurityDescriptor string `json:"DefaultConnectSecurityDescriptor,omitempty"`
ServiceTable map[string]HvSocketServiceConfig `json:"ServiceTable,omitempty"`
}

View File

@@ -0,0 +1,13 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Keyboard struct {
}

View File

@@ -0,0 +1,22 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Layer struct {
Id string `json:"Id,omitempty"`
Path string `json:"Path,omitempty"`
PathType string `json:"PathType,omitempty"`
// Unspecified defaults to Enabled
Cache string `json:"Cache,omitempty"`
}

View File

@@ -0,0 +1,18 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.2
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type LinuxKernelDirect struct {
KernelFilePath string `json:"KernelFilePath,omitempty"`
InitRdPath string `json:"InitRdPath,omitempty"`
KernelCmdLine string `json:"KernelCmdLine,omitempty"`
}

View File

@@ -0,0 +1,21 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type MappedDirectory struct {
HostPath string `json:"HostPath,omitempty"`
HostPathType string `json:"HostPathType,omitempty"`
ContainerPath string `json:"ContainerPath,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`
}

View File

@@ -0,0 +1,19 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type MappedPipe struct {
ContainerPipeName string `json:"ContainerPipeName,omitempty"`
HostPath string `json:"HostPath,omitempty"`
HostPathType string `json:"HostPathType,omitempty"`
}

View File

@@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Memory struct {
SizeInMB int32 `json:"SizeInMB,omitempty"`
}

View File

@@ -0,0 +1,25 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Memory2 struct {
SizeInMB int32 `json:"SizeInMB,omitempty"`
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
EnableHotHint bool `json:"EnableHotHint,omitempty"`
EnableColdHint bool `json:"EnableColdHint,omitempty"`
EnableEpf bool `json:"EnableEpf,omitempty"`
// EnableDeferredCommit is private in the schema. If regenerated need to add back.
EnableDeferredCommit bool `json:"EnableDeferredCommit,omitempty"`
}

View File

@@ -0,0 +1,19 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type MemoryInformationForVm struct {
VirtualNodeCount int32 `json:"VirtualNodeCount,omitempty"`
VirtualMachineMemory *VmMemory `json:"VirtualMachineMemory,omitempty"`
VirtualNodes []VirtualNodeInfo `json:"VirtualNodes,omitempty"`
}

View File

@@ -0,0 +1,20 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// Memory runtime statistics
type MemoryStats struct {
MemoryUsageCommitBytes int32 `json:"MemoryUsageCommitBytes,omitempty"`
MemoryUsageCommitPeakBytes int32 `json:"MemoryUsageCommitPeakBytes,omitempty"`
MemoryUsagePrivateWorkingSetBytes int32 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
}

View File

@@ -0,0 +1,20 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ModifySettingRequest struct {
ResourcePath string `json:"ResourcePath,omitempty"`
RequestType string `json:"RequestType,omitempty"`
Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated
GuestRequest interface{} `json:"GuestRequest,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated
}

View File

@@ -0,0 +1,13 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Mouse struct {
}

View File

@@ -0,0 +1,17 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type NetworkAdapter struct {
EndpointId string `json:"EndpointId,omitempty"`
MacAddress string `json:"MacAddress,omitempty"`
}

View File

@@ -0,0 +1,24 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Networking struct {
AllowUnqualifiedDnsQuery bool `json:"AllowUnqualifiedDnsQuery,omitempty"`
DnsSearchList string `json:"DnsSearchList,omitempty"`
NetworkSharedContainerName string `json:"NetworkSharedContainerName,omitempty"`
// Guid in windows; string in linux
Namespace string `json:"Namespace,omitempty"`
NetworkAdapters []string `json:"NetworkAdapters,omitempty"`
}

View File

@@ -0,0 +1,16 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// Notification data that is indicated to components running in the Virtual Machine.
type PauseNotification struct {
Reason string `json:"Reason,omitempty"`
}

View File

@@ -0,0 +1,18 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// Options for HcsPauseComputeSystem
type PauseOptions struct {
SuspensionLevel string `json:"SuspensionLevel,omitempty"`
HostedNotification *PauseNotification `json:"HostedNotification,omitempty"`
}

View File

@@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Plan9 struct {
Shares []Plan9Share `json:"Shares,omitempty"`
}

View File

@@ -0,0 +1,33 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type Plan9Share struct {
Name string `json:"Name,omitempty"`
// The name by which the guest operation system can access this share, via the aname parameter in the Plan9 protocol.
AccessName string `json:"AccessName,omitempty"`
Path string `json:"Path,omitempty"`
Port int32 `json:"Port,omitempty"`
// Flags are marked private. Until they are exported correctly
//
// ReadOnly 0x00000001
// LinuxMetadata 0x00000004
// CaseSensitive 0x00000008
Flags int32 `json:"Flags,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"`
UseShareRootIdentity bool `json:"UseShareRootIdentity,omitempty"`
}

View File

@@ -0,0 +1,34 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
import (
"time"
)
// Information about a process running in a container
type ProcessDetails struct {
ProcessId int32 `json:"ProcessId,omitempty"`
ImageName string `json:"ImageName,omitempty"`
CreateTimestamp time.Time `json:"CreateTimestamp,omitempty"`
UserTime100ns int32 `json:"UserTime100ns,omitempty"`
KernelTime100ns int32 `json:"KernelTime100ns,omitempty"`
MemoryCommitBytes int32 `json:"MemoryCommitBytes,omitempty"`
MemoryWorkingSetPrivateBytes int32 `json:"MemoryWorkingSetPrivateBytes,omitempty"`
MemoryWorkingSetSharedBytes int32 `json:"MemoryWorkingSetSharedBytes,omitempty"`
}

View File

@@ -0,0 +1,20 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
// Passed to HcsRpc_ModifyProcess
type ProcessModifyRequest struct {
Operation string `json:"Operation,omitempty"`
ConsoleSize *ConsoleSize `json:"ConsoleSize,omitempty"`
CloseHandle *CloseHandle `json:"CloseHandle,omitempty"`
}

View File

@@ -0,0 +1,47 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.1
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ProcessParameters struct {
ApplicationName string `json:"ApplicationName,omitempty"`
CommandLine string `json:"CommandLine,omitempty"`
// optional alternative to CommandLine, currently only supported by Linux GCS
CommandArgs []string `json:"CommandArgs,omitempty"`
User string `json:"User,omitempty"`
WorkingDirectory string `json:"WorkingDirectory,omitempty"`
Environment map[string]string `json:"Environment,omitempty"`
// if set, will run as low-privilege process
RestrictedToken bool `json:"RestrictedToken,omitempty"`
// if set, ignore StdErrPipe
EmulateConsole bool `json:"EmulateConsole,omitempty"`
CreateStdInPipe bool `json:"CreateStdInPipe,omitempty"`
CreateStdOutPipe bool `json:"CreateStdOutPipe,omitempty"`
CreateStdErrPipe bool `json:"CreateStdErrPipe,omitempty"`
// height then width
ConsoleSize []int32 `json:"ConsoleSize,omitempty"`
// if set, find an existing session for the user and create the process in it
UseExistingLogin bool `json:"UseExistingLogin,omitempty"`
// if set, use the legacy console instead of conhost
UseLegacyConsole bool `json:"UseLegacyConsole,omitempty"`
}

Some files were not shown because too many files have changed in this diff Show More