vendor update
This commit is contained in:
93
vendor/github.com/Microsoft/hcsshim/internal/appargs/appargs.go
generated
vendored
93
vendor/github.com/Microsoft/hcsshim/internal/appargs/appargs.go
generated
vendored
@ -1,93 +0,0 @@
|
||||
// Package appargs provides argument validation routines for use with
|
||||
// github.com/urfave/cli.
|
||||
package appargs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// Validator is an argument validator function. It returns the number of
|
||||
// arguments consumed or -1 on error.
|
||||
type Validator = func([]string) int
|
||||
|
||||
// String is a validator for strings.
|
||||
func String(args []string) int {
|
||||
if len(args) == 0 {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// NonEmptyString is a validator for non-empty strings.
|
||||
func NonEmptyString(args []string) int {
|
||||
if len(args) == 0 || args[0] == "" {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// Int returns a validator for integers.
|
||||
func Int(base int, min int, max int) Validator {
|
||||
return func(args []string) int {
|
||||
if len(args) == 0 {
|
||||
return -1
|
||||
}
|
||||
i, err := strconv.ParseInt(args[0], base, 0)
|
||||
if err != nil || int(i) < min || int(i) > max {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
// Optional returns a validator that treats an argument as optional.
|
||||
func Optional(v Validator) Validator {
|
||||
return func(args []string) int {
|
||||
if len(args) == 0 {
|
||||
return 0
|
||||
}
|
||||
return v(args)
|
||||
}
|
||||
}
|
||||
|
||||
// Rest returns a validator that validates each of the remaining arguments.
|
||||
func Rest(v Validator) Validator {
|
||||
return func(args []string) int {
|
||||
count := len(args)
|
||||
for len(args) != 0 {
|
||||
n := v(args)
|
||||
if n < 0 {
|
||||
return n
|
||||
}
|
||||
args = args[n:]
|
||||
}
|
||||
return count
|
||||
}
|
||||
}
|
||||
|
||||
// ErrInvalidUsage is returned when there is a validation error.
|
||||
var ErrInvalidUsage = errors.New("invalid command usage")
|
||||
|
||||
// Validate can be used as a command's Before function to validate the arguments
|
||||
// to the command.
|
||||
func Validate(vs ...Validator) cli.BeforeFunc {
|
||||
return func(context *cli.Context) error {
|
||||
remaining := context.Args()
|
||||
for _, v := range vs {
|
||||
consumed := v(remaining)
|
||||
if consumed < 0 {
|
||||
return ErrInvalidUsage
|
||||
}
|
||||
remaining = remaining[consumed:]
|
||||
}
|
||||
|
||||
if len(remaining) > 0 {
|
||||
return ErrInvalidUsage
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
137
vendor/github.com/Microsoft/hcsshim/internal/cni/registry_test.go
generated
vendored
137
vendor/github.com/Microsoft/hcsshim/internal/cni/registry_test.go
generated
vendored
@ -1,137 +0,0 @@
|
||||
package cni
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guid"
|
||||
"github.com/Microsoft/hcsshim/internal/regstate"
|
||||
)
|
||||
|
||||
func Test_LoadPersistedNamespaceConfig_NoConfig(t *testing.T) {
|
||||
pnc, err := LoadPersistedNamespaceConfig(t.Name())
|
||||
if pnc != nil {
|
||||
t.Fatal("config should be nil")
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatal("err should be set")
|
||||
} else {
|
||||
if !regstate.IsNotFoundError(err) {
|
||||
t.Fatal("err should be NotFoundError")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_LoadPersistedNamespaceConfig_WithConfig(t *testing.T) {
|
||||
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
|
||||
err := pnc.Store()
|
||||
if err != nil {
|
||||
pnc.Remove()
|
||||
t.Fatalf("store failed with: %v", err)
|
||||
}
|
||||
defer pnc.Remove()
|
||||
|
||||
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
|
||||
if err != nil {
|
||||
t.Fatal("should have no error on stored config")
|
||||
}
|
||||
if pnc2 == nil {
|
||||
t.Fatal("stored config should have been returned")
|
||||
} else {
|
||||
if pnc.namespaceID != pnc2.namespaceID {
|
||||
t.Fatal("actual/stored namespaceID not equal")
|
||||
}
|
||||
if pnc.ContainerID != pnc2.ContainerID {
|
||||
t.Fatal("actual/stored ContainerID not equal")
|
||||
}
|
||||
if pnc.HostUniqueID != pnc2.HostUniqueID {
|
||||
t.Fatal("actual/stored HostUniqueID not equal")
|
||||
}
|
||||
if !pnc2.stored {
|
||||
t.Fatal("stored should be true for registry load")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_PersistedNamespaceConfig_StoreNew(t *testing.T) {
|
||||
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
|
||||
err := pnc.Store()
|
||||
if err != nil {
|
||||
pnc.Remove()
|
||||
t.Fatalf("store failed with: %v", err)
|
||||
}
|
||||
defer pnc.Remove()
|
||||
}
|
||||
|
||||
func Test_PersistedNamespaceConfig_StoreUpdate(t *testing.T) {
|
||||
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
|
||||
err := pnc.Store()
|
||||
if err != nil {
|
||||
pnc.Remove()
|
||||
t.Fatalf("store failed with: %v", err)
|
||||
}
|
||||
defer pnc.Remove()
|
||||
|
||||
pnc.ContainerID = "test-container2"
|
||||
pnc.HostUniqueID = guid.New()
|
||||
err = pnc.Store()
|
||||
if err != nil {
|
||||
pnc.Remove()
|
||||
t.Fatalf("store update failed with: %v", err)
|
||||
}
|
||||
|
||||
// Verify the update
|
||||
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
|
||||
if err != nil {
|
||||
t.Fatal("stored config should have been returned")
|
||||
}
|
||||
if pnc.ContainerID != pnc2.ContainerID {
|
||||
t.Fatal("actual/stored ContainerID not equal")
|
||||
}
|
||||
if pnc.HostUniqueID != pnc2.HostUniqueID {
|
||||
t.Fatal("actual/stored HostUniqueID not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_PersistedNamespaceConfig_RemoveNotStored(t *testing.T) {
|
||||
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
|
||||
err := pnc.Remove()
|
||||
if err != nil {
|
||||
t.Fatalf("remove on not stored should not fail: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_PersistedNamespaceConfig_RemoveStoredKey(t *testing.T) {
|
||||
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
|
||||
err := pnc.Store()
|
||||
if err != nil {
|
||||
t.Fatalf("store failed with: %v", err)
|
||||
}
|
||||
err = pnc.Remove()
|
||||
if err != nil {
|
||||
t.Fatalf("remove on stored key should not fail: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_PersistedNamespaceConfig_RemovedOtherKey(t *testing.T) {
|
||||
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
|
||||
err := pnc.Store()
|
||||
if err != nil {
|
||||
t.Fatalf("store failed with: %v", err)
|
||||
}
|
||||
|
||||
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
|
||||
if err != nil {
|
||||
t.Fatal("should of found stored config")
|
||||
}
|
||||
|
||||
err = pnc.Remove()
|
||||
if err != nil {
|
||||
t.Fatalf("remove on stored key should not fail: %v", err)
|
||||
}
|
||||
|
||||
// Now remove the other key that has the invalid memory state
|
||||
err = pnc2.Remove()
|
||||
if err != nil {
|
||||
t.Fatalf("remove on in-memory already removed should not fail: %v", err)
|
||||
}
|
||||
}
|
40
vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go
generated
vendored
40
vendor/github.com/Microsoft/hcsshim/internal/copyfile/copyfile.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package copyfile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procCopyFileW = modkernel32.NewProc("CopyFileW")
|
||||
)
|
||||
|
||||
// CopyFile is a utility for copying a file - used for the LCOW scratch cache.
|
||||
// Uses CopyFileW win32 API for performance.
|
||||
func CopyFile(srcFile, destFile string, overwrite bool) error {
|
||||
var bFailIfExists uint32 = 1
|
||||
if overwrite {
|
||||
bFailIfExists = 0
|
||||
}
|
||||
|
||||
lpExistingFileName, err := syscall.UTF16PtrFromString(srcFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lpNewFileName, err := syscall.UTF16PtrFromString(destFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r1, _, err := syscall.Syscall(
|
||||
procCopyFileW.Addr(),
|
||||
3,
|
||||
uintptr(unsafe.Pointer(lpExistingFileName)),
|
||||
uintptr(unsafe.Pointer(lpNewFileName)),
|
||||
uintptr(bFailIfExists))
|
||||
if r1 == 0 {
|
||||
return fmt.Errorf("failed CopyFileW Win32 call from '%s' to '%s': %s", srcFile, destFile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
103
vendor/github.com/Microsoft/hcsshim/internal/copywithtimeout/copywithtimeout.go
generated
vendored
103
vendor/github.com/Microsoft/hcsshim/internal/copywithtimeout/copywithtimeout.go
generated
vendored
@ -1,103 +0,0 @@
|
||||
package copywithtimeout
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// logDataByteCount is for an advanced debugging technique to allow
|
||||
// data read/written to a processes stdio channels hex-dumped to the
|
||||
// log when running at debug level or higher. It is controlled through
|
||||
// the environment variable HCSSHIM_LOG_DATA_BYTE_COUNT
|
||||
var logDataByteCount int64
|
||||
|
||||
func init() {
|
||||
bytes := os.Getenv("HCSSHIM_LOG_DATA_BYTE_COUNT")
|
||||
if len(bytes) > 0 {
|
||||
u, err := strconv.ParseUint(bytes, 10, 32)
|
||||
if err == nil {
|
||||
logDataByteCount = int64(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy is a wrapper for io.Copy using a timeout duration
|
||||
func Copy(dst io.Writer, src io.Reader, size int64, context string, timeout time.Duration) (int64, error) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"stdval": context,
|
||||
"size": size,
|
||||
"timeout": timeout,
|
||||
}).Debug("hcsshim::copywithtimeout - Begin")
|
||||
|
||||
type resultType struct {
|
||||
err error
|
||||
bytes int64
|
||||
}
|
||||
|
||||
done := make(chan resultType, 1)
|
||||
go func() {
|
||||
result := resultType{}
|
||||
if logrus.GetLevel() < logrus.DebugLevel || logDataByteCount == 0 {
|
||||
result.bytes, result.err = io.Copy(dst, src)
|
||||
} else {
|
||||
// In advanced debug mode where we log (hexdump format) what is copied
|
||||
// up to the number of bytes defined by environment variable
|
||||
// HCSSHIM_LOG_DATA_BYTE_COUNT
|
||||
var buf bytes.Buffer
|
||||
tee := io.TeeReader(src, &buf)
|
||||
result.bytes, result.err = io.Copy(dst, tee)
|
||||
if result.err == nil {
|
||||
size := result.bytes
|
||||
if size > logDataByteCount {
|
||||
size = logDataByteCount
|
||||
}
|
||||
if size > 0 {
|
||||
bytes := make([]byte, size)
|
||||
if _, err := buf.Read(bytes); err == nil {
|
||||
logrus.Debugf("hcsshim::copyWithTimeout - Read bytes\n%s", hex.Dump(bytes))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
done <- result
|
||||
}()
|
||||
|
||||
var result resultType
|
||||
timedout := time.After(timeout)
|
||||
|
||||
select {
|
||||
case <-timedout:
|
||||
return 0, fmt.Errorf("hcsshim::copyWithTimeout: timed out (%s)", context)
|
||||
case result = <-done:
|
||||
if result.err != nil && result.err != io.EOF {
|
||||
// See https://github.com/golang/go/blob/f3f29d1dea525f48995c1693c609f5e67c046893/src/os/exec/exec_windows.go for a clue as to why we are doing this :)
|
||||
if se, ok := result.err.(syscall.Errno); ok {
|
||||
const (
|
||||
errNoData = syscall.Errno(232)
|
||||
errBrokenPipe = syscall.Errno(109)
|
||||
)
|
||||
if se == errNoData || se == errBrokenPipe {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"stdval": context,
|
||||
logrus.ErrorKey: se,
|
||||
}).Debug("hcsshim::copywithtimeout - End")
|
||||
return result.bytes, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("hcsshim::copyWithTimeout: error reading: '%s' after %d bytes (%s)", result.err, result.bytes, context)
|
||||
}
|
||||
}
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"stdval": context,
|
||||
"copied-bytes": result.bytes,
|
||||
}).Debug("hcsshim::copywithtimeout - Completed Successfully")
|
||||
return result.bytes, nil
|
||||
}
|
136
vendor/github.com/Microsoft/hcsshim/internal/guid/guid_test.go
generated
vendored
136
vendor/github.com/Microsoft/hcsshim/internal/guid/guid_test.go
generated
vendored
@ -1,136 +0,0 @@
|
||||
package guid
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_New(t *testing.T) {
|
||||
g := New()
|
||||
g2 := New()
|
||||
if g == g2 {
|
||||
t.Fatal("GUID's should not be equal when generated")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_FromString(t *testing.T) {
|
||||
g := New()
|
||||
g2 := FromString(g.String())
|
||||
if g != g2 {
|
||||
t.Fatalf("GUID's not equal %v, %v", g, g2)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MarshalJSON(t *testing.T) {
|
||||
g := New()
|
||||
gs := g.String()
|
||||
js, err := json.Marshal(g)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal with %v", err)
|
||||
}
|
||||
gsJSON := fmt.Sprintf("\"%s\"", gs)
|
||||
if gsJSON != string(js) {
|
||||
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MarshalJSON_Ptr(t *testing.T) {
|
||||
g := New()
|
||||
gs := g.String()
|
||||
js, err := json.Marshal(&g)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal with %v", err)
|
||||
}
|
||||
gsJSON := fmt.Sprintf("\"%s\"", gs)
|
||||
if gsJSON != string(js) {
|
||||
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MarshalJSON_Nested(t *testing.T) {
|
||||
type test struct {
|
||||
G GUID
|
||||
}
|
||||
t1 := test{
|
||||
G: New(),
|
||||
}
|
||||
gs := t1.G.String()
|
||||
js, err := json.Marshal(t1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal with %v", err)
|
||||
}
|
||||
gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs)
|
||||
if gsJSON != string(js) {
|
||||
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MarshalJSON_Nested_Ptr(t *testing.T) {
|
||||
type test struct {
|
||||
G *GUID
|
||||
}
|
||||
v := New()
|
||||
t1 := test{
|
||||
G: &v,
|
||||
}
|
||||
gs := t1.G.String()
|
||||
js, err := json.Marshal(t1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to marshal with %v", err)
|
||||
}
|
||||
gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs)
|
||||
if gsJSON != string(js) {
|
||||
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_UnmarshalJSON(t *testing.T) {
|
||||
g := New()
|
||||
js, _ := json.Marshal(g)
|
||||
var g2 GUID
|
||||
err := json.Unmarshal(js, &g2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to unmarshal with: %v", err)
|
||||
}
|
||||
if g != g2 {
|
||||
t.Fatalf("failed to unmarshal %s != %s", g, g2)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_UnmarshalJSON_Nested(t *testing.T) {
|
||||
type test struct {
|
||||
G GUID
|
||||
}
|
||||
t1 := test{
|
||||
G: New(),
|
||||
}
|
||||
js, _ := json.Marshal(t1)
|
||||
var t2 test
|
||||
err := json.Unmarshal(js, &t2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to unmarshal with: %v", err)
|
||||
}
|
||||
if t1.G != t2.G {
|
||||
t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_UnmarshalJSON_Nested_Ptr(t *testing.T) {
|
||||
type test struct {
|
||||
G *GUID
|
||||
}
|
||||
v := New()
|
||||
t1 := test{
|
||||
G: &v,
|
||||
}
|
||||
js, _ := json.Marshal(t1)
|
||||
var t2 test
|
||||
err := json.Unmarshal(js, &t2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to unmarshal with: %v", err)
|
||||
}
|
||||
if *t1.G != *t2.G {
|
||||
t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G)
|
||||
}
|
||||
}
|
9
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/hcs/log.go
generated
vendored
@ -7,9 +7,14 @@ func logOperationBegin(ctx logrus.Fields, msg string) {
|
||||
}
|
||||
|
||||
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
|
||||
// Copy the log and fields first.
|
||||
log := logrus.WithFields(ctx)
|
||||
if err == nil {
|
||||
logrus.WithFields(ctx).Debug(msg)
|
||||
log.Debug(msg)
|
||||
} else {
|
||||
logrus.WithFields(ctx).WithError(err).Error(msg)
|
||||
// Edit only the copied field data to avoid race conditions on the
|
||||
// write.
|
||||
log.Data[logrus.ErrorKey] = err
|
||||
log.Error(msg)
|
||||
}
|
||||
}
|
||||
|
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
33
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
@ -31,9 +31,8 @@ func newProcess(process hcsProcess, processID int, computeSystem *System) *Proce
|
||||
processID: processID,
|
||||
system: computeSystem,
|
||||
logctx: logrus.Fields{
|
||||
logfields.HCSOperation: "",
|
||||
logfields.ContainerID: computeSystem.ID(),
|
||||
logfields.ProcessID: processID,
|
||||
logfields.ContainerID: computeSystem.ID(),
|
||||
logfields.ProcessID: processID,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -88,13 +87,12 @@ func (process *Process) SystemID() string {
|
||||
}
|
||||
|
||||
func (process *Process) logOperationBegin(operation string) {
|
||||
process.logctx[logfields.HCSOperation] = operation
|
||||
logOperationBegin(
|
||||
process.logctx,
|
||||
"hcsshim::Process - Begin Operation")
|
||||
operation+" - Begin Operation")
|
||||
}
|
||||
|
||||
func (process *Process) logOperationEnd(err error) {
|
||||
func (process *Process) logOperationEnd(operation string, err error) {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
@ -104,9 +102,8 @@ func (process *Process) logOperationEnd(err error) {
|
||||
|
||||
logOperationEnd(
|
||||
process.logctx,
|
||||
"hcsshim::Process - End Operation - "+result,
|
||||
operation+" - End Operation - "+result,
|
||||
err)
|
||||
process.logctx[logfields.HCSOperation] = ""
|
||||
}
|
||||
|
||||
// Signal signals the process with `options`.
|
||||
@ -116,7 +113,7 @@ func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err e
|
||||
|
||||
operation := "hcsshim::Process::Signal"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -148,7 +145,7 @@ func (process *Process) Kill() (err error) {
|
||||
|
||||
operation := "hcsshim::Process::Kill"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -170,7 +167,7 @@ func (process *Process) Kill() (err error) {
|
||||
func (process *Process) Wait() (err error) {
|
||||
operation := "hcsshim::Process::Wait"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
|
||||
if err != nil {
|
||||
@ -185,7 +182,7 @@ func (process *Process) Wait() (err error) {
|
||||
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
|
||||
operation := "hcssshim::Process::WaitTimeout"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
|
||||
if err != nil {
|
||||
@ -202,7 +199,7 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
|
||||
|
||||
operation := "hcsshim::Process::ResizeConsole"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -239,7 +236,7 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
|
||||
|
||||
operation := "hcsshim::Process::Properties"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -275,7 +272,7 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
|
||||
func (process *Process) ExitCode() (_ int, err error) {
|
||||
operation := "hcsshim::Process::ExitCode"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
properties, err := process.Properties()
|
||||
if err != nil {
|
||||
@ -302,7 +299,7 @@ func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadClo
|
||||
|
||||
operation := "hcsshim::Process::Stdio"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -346,7 +343,7 @@ func (process *Process) CloseStdin() (err error) {
|
||||
|
||||
operation := "hcsshim::Process::CloseStdin"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
if process.handle == 0 {
|
||||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
@ -384,7 +381,7 @@ func (process *Process) Close() (err error) {
|
||||
|
||||
operation := "hcsshim::Process::Close"
|
||||
process.logOperationBegin(operation)
|
||||
defer func() { process.logOperationEnd(err) }()
|
||||
defer func() { process.logOperationEnd(operation, err) }()
|
||||
|
||||
// Don't double free this
|
||||
if process.handle == 0 {
|
||||
|
57
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
57
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@ -49,20 +49,18 @@ func newSystem(id string) *System {
|
||||
return &System{
|
||||
id: id,
|
||||
logctx: logrus.Fields{
|
||||
logfields.HCSOperation: "",
|
||||
logfields.ContainerID: id,
|
||||
logfields.ContainerID: id,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (computeSystem *System) logOperationBegin(operation string) {
|
||||
computeSystem.logctx[logfields.HCSOperation] = operation
|
||||
logOperationBegin(
|
||||
computeSystem.logctx,
|
||||
"hcsshim::ComputeSystem - Begin Operation")
|
||||
operation+" - Begin Operation")
|
||||
}
|
||||
|
||||
func (computeSystem *System) logOperationEnd(err error) {
|
||||
func (computeSystem *System) logOperationEnd(operation string, err error) {
|
||||
var result string
|
||||
if err == nil {
|
||||
result = "Success"
|
||||
@ -72,9 +70,8 @@ func (computeSystem *System) logOperationEnd(err error) {
|
||||
|
||||
logOperationEnd(
|
||||
computeSystem.logctx,
|
||||
"hcsshim::ComputeSystem - End Operation - "+result,
|
||||
operation+" - End Operation - "+result,
|
||||
err)
|
||||
computeSystem.logctx[logfields.HCSOperation] = ""
|
||||
}
|
||||
|
||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||
@ -83,7 +80,7 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
|
||||
|
||||
computeSystem := newSystem(id)
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
|
||||
if err != nil {
|
||||
@ -135,9 +132,9 @@ func OpenComputeSystem(id string) (_ *System, err error) {
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsNotExist(err) {
|
||||
computeSystem.logOperationEnd(nil)
|
||||
computeSystem.logOperationEnd(operation, nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(err)
|
||||
computeSystem.logOperationEnd(operation, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -163,12 +160,10 @@ func OpenComputeSystem(id string) (_ *System, err error) {
|
||||
// GetComputeSystems gets a list of the compute systems on the system that match the query
|
||||
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
|
||||
operation := "hcsshim::GetComputeSystems"
|
||||
fields := logrus.Fields{
|
||||
logfields.HCSOperation: operation,
|
||||
}
|
||||
fields := logrus.Fields{}
|
||||
logOperationBegin(
|
||||
fields,
|
||||
"hcsshim::ComputeSystem - Begin Operation")
|
||||
operation+" - Begin Operation")
|
||||
|
||||
defer func() {
|
||||
var result string
|
||||
@ -180,7 +175,7 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope
|
||||
|
||||
logOperationEnd(
|
||||
fields,
|
||||
"hcsshim::ComputeSystem - End Operation - "+result,
|
||||
operation+" - End Operation - "+result,
|
||||
err)
|
||||
}()
|
||||
|
||||
@ -227,7 +222,7 @@ func (computeSystem *System) Start() (err error) {
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Start"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
|
||||
@ -286,9 +281,9 @@ func (computeSystem *System) Shutdown() (err error) {
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsAlreadyStopped(err) {
|
||||
computeSystem.logOperationEnd(nil)
|
||||
computeSystem.logOperationEnd(operation, nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(err)
|
||||
computeSystem.logOperationEnd(operation, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -318,9 +313,9 @@ func (computeSystem *System) Terminate() (err error) {
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() {
|
||||
if IsPending(err) {
|
||||
computeSystem.logOperationEnd(nil)
|
||||
computeSystem.logOperationEnd(operation, nil)
|
||||
} else {
|
||||
computeSystem.logOperationEnd(err)
|
||||
computeSystem.logOperationEnd(operation, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -344,7 +339,7 @@ func (computeSystem *System) Terminate() (err error) {
|
||||
func (computeSystem *System) Wait() (err error) {
|
||||
operation := "hcsshim::ComputeSystem::Wait"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
if err != nil {
|
||||
@ -359,10 +354,10 @@ func (computeSystem *System) Wait() (err error) {
|
||||
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
|
||||
operation := "hcsshim::ComputeSystem::WaitExpectedError"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
if err != nil && err != expected {
|
||||
if err != nil && getInnerError(err) != expected {
|
||||
return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil)
|
||||
}
|
||||
|
||||
@ -374,7 +369,7 @@ func (computeSystem *System) WaitExpectedError(expected error) (err error) {
|
||||
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
|
||||
operation := "hcsshim::ComputeSystem::WaitTimeout"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
|
||||
if err != nil {
|
||||
@ -390,7 +385,7 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schem
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Properties"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
queryj, err := json.Marshal(schema1.PropertyQuery{types})
|
||||
if err != nil {
|
||||
@ -429,7 +424,7 @@ func (computeSystem *System) Pause() (err error) {
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Pause"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
|
||||
@ -454,7 +449,7 @@ func (computeSystem *System) Resume() (err error) {
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Resume"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
|
||||
@ -479,7 +474,7 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error
|
||||
|
||||
operation := "hcsshim::ComputeSystem::CreateProcess"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
var (
|
||||
processInfo hcsProcessInformation
|
||||
@ -539,7 +534,7 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
|
||||
|
||||
operation := "hcsshim::ComputeSystem::OpenProcess"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
var (
|
||||
processHandle hcsProcess
|
||||
@ -573,7 +568,7 @@ func (computeSystem *System) Close() (err error) {
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Close"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
// Don't double free this
|
||||
if computeSystem.handle == 0 {
|
||||
@ -660,7 +655,7 @@ func (computeSystem *System) Modify(config interface{}) (err error) {
|
||||
|
||||
operation := "hcsshim::ComputeSystem::Modify"
|
||||
computeSystem.logOperationBegin(operation)
|
||||
defer func() { computeSystem.logOperationEnd(err) }()
|
||||
defer func() { computeSystem.logOperationEnd(operation, err) }()
|
||||
|
||||
if computeSystem.handle == 0 {
|
||||
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
|
||||
|
173
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go
generated
vendored
173
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcsoci
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guid"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
"github.com/Microsoft/hcsshim/internal/uvm"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CreateOptions are the set of fields used to call CreateContainer().
|
||||
// Note: In the spec, the LayerFolders must be arranged in the same way in which
|
||||
// moby configures them: layern, layern-1,...,layer2,layer1,scratch
|
||||
// where layer1 is the base read-only layer, layern is the top-most read-only
|
||||
// layer, and scratch is the RW layer. This is for historical reasons only.
|
||||
type CreateOptions struct {
|
||||
|
||||
// Common parameters
|
||||
ID string // Identifier for the container
|
||||
Owner string // Specifies the owner. Defaults to executable name.
|
||||
Spec *specs.Spec // Definition of the container or utility VM being created
|
||||
SchemaVersion *hcsschema.Version // Requested Schema Version. Defaults to v2 for RS5, v1 for RS1..RS4
|
||||
HostingSystem *uvm.UtilityVM // Utility or service VM in which the container is to be created.
|
||||
NetworkNamespace string // Host network namespace to use (overrides anything in the spec)
|
||||
|
||||
// This is an advanced debugging parameter. It allows for diagnosibility by leaving a containers
|
||||
// resources allocated in case of a failure. Thus you would be able to use tools such as hcsdiag
|
||||
// to look at the state of a utility VM to see what resources were allocated. Obviously the caller
|
||||
// must a) not tear down the utility VM on failure (or pause in some way) and b) is responsible for
|
||||
// performing the ReleaseResources() call themselves.
|
||||
DoNotReleaseResourcesOnFailure bool
|
||||
}
|
||||
|
||||
// createOptionsInternal is the set of user-supplied create options, but includes internal
|
||||
// fields for processing the request once user-supplied stuff has been validated.
|
||||
type createOptionsInternal struct {
|
||||
*CreateOptions
|
||||
|
||||
actualSchemaVersion *hcsschema.Version // Calculated based on Windows build and optional caller-supplied override
|
||||
actualID string // Identifier for the container
|
||||
actualOwner string // Owner for the container
|
||||
actualNetworkNamespace string
|
||||
}
|
||||
|
||||
// CreateContainer creates a container. It can cope with a wide variety of
|
||||
// scenarios, including v1 HCS schema calls, as well as more complex v2 HCS schema
|
||||
// calls. Note we always return the resources that have been allocated, even in the
|
||||
// case of an error. This provides support for the debugging option not to
|
||||
// release the resources on failure, so that the client can make the necessary
|
||||
// call to release resources that have been allocated as part of calling this function.
|
||||
func CreateContainer(createOptions *CreateOptions) (_ *hcs.System, _ *Resources, err error) {
|
||||
logrus.Debugf("hcsshim::CreateContainer options: %+v", createOptions)
|
||||
|
||||
coi := &createOptionsInternal{
|
||||
CreateOptions: createOptions,
|
||||
actualID: createOptions.ID,
|
||||
actualOwner: createOptions.Owner,
|
||||
}
|
||||
|
||||
// Defaults if omitted by caller.
|
||||
if coi.actualID == "" {
|
||||
coi.actualID = guid.New().String()
|
||||
}
|
||||
if coi.actualOwner == "" {
|
||||
coi.actualOwner = filepath.Base(os.Args[0])
|
||||
}
|
||||
|
||||
if coi.Spec == nil {
|
||||
return nil, nil, fmt.Errorf("Spec must be supplied")
|
||||
}
|
||||
|
||||
if coi.HostingSystem != nil {
|
||||
// By definition, a hosting system can only be supplied for a v2 Xenon.
|
||||
coi.actualSchemaVersion = schemaversion.SchemaV21()
|
||||
} else {
|
||||
coi.actualSchemaVersion = schemaversion.DetermineSchemaVersion(coi.SchemaVersion)
|
||||
logrus.Debugf("hcsshim::CreateContainer using schema %s", schemaversion.String(coi.actualSchemaVersion))
|
||||
}
|
||||
|
||||
resources := &Resources{}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !coi.DoNotReleaseResourcesOnFailure {
|
||||
ReleaseResources(resources, coi.HostingSystem, true)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if coi.HostingSystem != nil {
|
||||
n := coi.HostingSystem.ContainerCounter()
|
||||
if coi.Spec.Linux != nil {
|
||||
resources.containerRootInUVM = "/run/gcs/c/" + strconv.FormatUint(n, 16)
|
||||
} else {
|
||||
resources.containerRootInUVM = `C:\c\` + strconv.FormatUint(n, 16)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a network namespace if necessary.
|
||||
if coi.Spec.Windows != nil &&
|
||||
coi.Spec.Windows.Network != nil &&
|
||||
schemaversion.IsV21(coi.actualSchemaVersion) {
|
||||
|
||||
if coi.NetworkNamespace != "" {
|
||||
resources.netNS = coi.NetworkNamespace
|
||||
} else {
|
||||
err := createNetworkNamespace(coi, resources)
|
||||
if err != nil {
|
||||
return nil, resources, err
|
||||
}
|
||||
}
|
||||
coi.actualNetworkNamespace = resources.netNS
|
||||
if coi.HostingSystem != nil {
|
||||
endpoints, err := getNamespaceEndpoints(coi.actualNetworkNamespace)
|
||||
if err != nil {
|
||||
return nil, resources, err
|
||||
}
|
||||
err = coi.HostingSystem.AddNetNS(coi.actualNetworkNamespace, endpoints)
|
||||
if err != nil {
|
||||
return nil, resources, err
|
||||
}
|
||||
resources.addedNetNSToVM = true
|
||||
}
|
||||
}
|
||||
|
||||
var hcsDocument interface{}
|
||||
logrus.Debugf("hcsshim::CreateContainer allocating resources")
|
||||
if coi.Spec.Linux != nil {
|
||||
if schemaversion.IsV10(coi.actualSchemaVersion) {
|
||||
return nil, resources, errors.New("LCOW v1 not supported")
|
||||
}
|
||||
logrus.Debugf("hcsshim::CreateContainer allocateLinuxResources")
|
||||
err = allocateLinuxResources(coi, resources)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to allocateLinuxResources %s", err)
|
||||
return nil, resources, err
|
||||
}
|
||||
hcsDocument, err = createLinuxContainerDocument(coi, resources.containerRootInUVM)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed createHCSContainerDocument %s", err)
|
||||
return nil, resources, err
|
||||
}
|
||||
} else {
|
||||
err = allocateWindowsResources(coi, resources)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to allocateWindowsResources %s", err)
|
||||
return nil, resources, err
|
||||
}
|
||||
logrus.Debugf("hcsshim::CreateContainer creating container document")
|
||||
hcsDocument, err = createWindowsContainerDocument(coi)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed createHCSContainerDocument %s", err)
|
||||
return nil, resources, err
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("hcsshim::CreateContainer creating compute system")
|
||||
system, err := hcs.CreateComputeSystem(coi.actualID, hcsDocument)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to CreateComputeSystem %s", err)
|
||||
return nil, resources, err
|
||||
}
|
||||
return system, resources, err
|
||||
}
|
78
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create_test.go
generated
vendored
78
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/create_test.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
// +build windows,functional
|
||||
|
||||
//
|
||||
// These unit tests must run on a system setup to run both Argons and Xenons,
|
||||
// have docker installed, and have the nanoserver (WCOW) and alpine (LCOW)
|
||||
// base images installed. The nanoserver image MUST match the build of the
|
||||
// host.
|
||||
//
|
||||
// We rely on docker as the tools to extract a container image aren't
|
||||
// open source. We use it to find the location of the base image on disk.
|
||||
//
|
||||
|
||||
package hcsoci
|
||||
|
||||
//import (
|
||||
// "bytes"
|
||||
// "encoding/json"
|
||||
// "io/ioutil"
|
||||
// "os"
|
||||
// "os/exec"
|
||||
// "path/filepath"
|
||||
// "strings"
|
||||
// "testing"
|
||||
|
||||
// "github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
// _ "github.com/Microsoft/hcsshim/test/assets"
|
||||
// specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
// "github.com/sirupsen/logrus"
|
||||
//)
|
||||
|
||||
//func startUVM(t *testing.T, uvm *UtilityVM) {
|
||||
// if err := uvm.Start(); err != nil {
|
||||
// t.Fatalf("UVM %s Failed start: %s", uvm.Id, err)
|
||||
// }
|
||||
//}
|
||||
|
||||
//// Helper to shoot a utility VM
|
||||
//func terminateUtilityVM(t *testing.T, uvm *UtilityVM) {
|
||||
// if err := uvm.Terminate(); err != nil {
|
||||
// t.Fatalf("Failed terminate utility VM %s", err)
|
||||
// }
|
||||
//}
|
||||
|
||||
//// TODO: Test UVMResourcesFromContainerSpec
|
||||
//func TestUVMSizing(t *testing.T) {
|
||||
// t.Skip("for now - not implemented at all")
|
||||
//}
|
||||
|
||||
//// TestID validates that the requested ID is retrieved
|
||||
//func TestID(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
// tempDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(tempDir)
|
||||
|
||||
// layers := append(layersNanoserver, tempDir)
|
||||
// mountPath, err := mountContainerLayers(layers, nil)
|
||||
// if err != nil {
|
||||
// t.Fatalf("failed to mount container storage: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// Id: "gruntbuggly",
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Spec: &specs.Spec{
|
||||
// Windows: &specs.Windows{LayerFolders: layers},
|
||||
// Root: &specs.Root{Path: mountPath.(string)},
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// if c.ID() != "gruntbuggly" {
|
||||
// t.Fatalf("id not set correctly: %s", c.ID())
|
||||
// }
|
||||
|
||||
// c.Terminate()
|
||||
//}
|
115
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go
generated
vendored
115
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_lcow.go
generated
vendored
@ -1,115 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcsoci
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func createLCOWSpec(coi *createOptionsInternal) (*specs.Spec, error) {
|
||||
// Remarshal the spec to perform a deep copy.
|
||||
j, err := json.Marshal(coi.Spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec := &specs.Spec{}
|
||||
err = json.Unmarshal(j, spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO
|
||||
// Translate the mounts. The root has already been translated in
|
||||
// allocateLinuxResources.
|
||||
/*
|
||||
for i := range spec.Mounts {
|
||||
spec.Mounts[i].Source = "???"
|
||||
spec.Mounts[i].Destination = "???"
|
||||
}
|
||||
*/
|
||||
|
||||
// Linux containers don't care about Windows aspects of the spec except the
|
||||
// network namespace
|
||||
spec.Windows = nil
|
||||
if coi.Spec.Windows != nil &&
|
||||
coi.Spec.Windows.Network != nil &&
|
||||
coi.Spec.Windows.Network.NetworkNamespace != "" {
|
||||
spec.Windows = &specs.Windows{
|
||||
Network: &specs.WindowsNetwork{
|
||||
NetworkNamespace: coi.Spec.Windows.Network.NetworkNamespace,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Hooks are not supported (they should be run in the host)
|
||||
spec.Hooks = nil
|
||||
|
||||
// Clear unsupported features
|
||||
if spec.Linux.Resources != nil {
|
||||
spec.Linux.Resources.Devices = nil
|
||||
spec.Linux.Resources.Memory = nil
|
||||
spec.Linux.Resources.Pids = nil
|
||||
spec.Linux.Resources.BlockIO = nil
|
||||
spec.Linux.Resources.HugepageLimits = nil
|
||||
spec.Linux.Resources.Network = nil
|
||||
}
|
||||
spec.Linux.Seccomp = nil
|
||||
|
||||
// Clear any specified namespaces
|
||||
var namespaces []specs.LinuxNamespace
|
||||
for _, ns := range spec.Linux.Namespaces {
|
||||
switch ns.Type {
|
||||
case specs.NetworkNamespace:
|
||||
default:
|
||||
ns.Path = ""
|
||||
namespaces = append(namespaces, ns)
|
||||
}
|
||||
}
|
||||
spec.Linux.Namespaces = namespaces
|
||||
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
// This is identical to hcsschema.ComputeSystem but HostedSystem is an LCOW specific type - the schema docs only include WCOW.
|
||||
type linuxComputeSystem struct {
|
||||
Owner string `json:"Owner,omitempty"`
|
||||
SchemaVersion *hcsschema.Version `json:"SchemaVersion,omitempty"`
|
||||
HostingSystemId string `json:"HostingSystemId,omitempty"`
|
||||
HostedSystem *linuxHostedSystem `json:"HostedSystem,omitempty"`
|
||||
Container *hcsschema.Container `json:"Container,omitempty"`
|
||||
VirtualMachine *hcsschema.VirtualMachine `json:"VirtualMachine,omitempty"`
|
||||
ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"`
|
||||
}
|
||||
|
||||
type linuxHostedSystem struct {
|
||||
SchemaVersion *hcsschema.Version
|
||||
OciBundlePath string
|
||||
OciSpecification *specs.Spec
|
||||
}
|
||||
|
||||
func createLinuxContainerDocument(coi *createOptionsInternal, guestRoot string) (interface{}, error) {
|
||||
spec, err := createLCOWSpec(coi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("hcsshim::createLinuxContainerDoc: guestRoot:%s", guestRoot)
|
||||
v2 := &linuxComputeSystem{
|
||||
Owner: coi.actualOwner,
|
||||
SchemaVersion: schemaversion.SchemaV21(),
|
||||
ShouldTerminateOnLastHandleClosed: true,
|
||||
HostingSystemId: coi.HostingSystem.ID(),
|
||||
HostedSystem: &linuxHostedSystem{
|
||||
SchemaVersion: schemaversion.SchemaV21(),
|
||||
OciBundlePath: guestRoot,
|
||||
OciSpecification: spec,
|
||||
},
|
||||
}
|
||||
|
||||
return v2, nil
|
||||
}
|
273
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go
generated
vendored
273
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/hcsdoc_wcow.go
generated
vendored
@ -1,273 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcsoci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
"github.com/Microsoft/hcsshim/internal/uvm"
|
||||
"github.com/Microsoft/hcsshim/internal/uvmfolder"
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// createWindowsContainerDocument creates a document suitable for calling HCS to create
|
||||
// a container, both hosted and process isolated. It can create both v1 and v2
|
||||
// schema, WCOW only. The containers storage should have been mounted already.
|
||||
func createWindowsContainerDocument(coi *createOptionsInternal) (interface{}, error) {
|
||||
logrus.Debugf("hcsshim: CreateHCSContainerDocument")
|
||||
// TODO: Make this safe if exported so no null pointer dereferences.
|
||||
|
||||
if coi.Spec == nil {
|
||||
return nil, fmt.Errorf("cannot create HCS container document - OCI spec is missing")
|
||||
}
|
||||
|
||||
if coi.Spec.Windows == nil {
|
||||
return nil, fmt.Errorf("cannot create HCS container document - OCI spec Windows section is missing ")
|
||||
}
|
||||
|
||||
v1 := &schema1.ContainerConfig{
|
||||
SystemType: "Container",
|
||||
Name: coi.actualID,
|
||||
Owner: coi.actualOwner,
|
||||
HvPartition: false,
|
||||
IgnoreFlushesDuringBoot: coi.Spec.Windows.IgnoreFlushesDuringBoot,
|
||||
}
|
||||
|
||||
// IgnoreFlushesDuringBoot is a property of the SCSI attachment for the scratch. Set when it's hot-added to the utility VM
|
||||
// ID is a property on the create call in V2 rather than part of the schema.
|
||||
v2 := &hcsschema.ComputeSystem{
|
||||
Owner: coi.actualOwner,
|
||||
SchemaVersion: schemaversion.SchemaV21(),
|
||||
ShouldTerminateOnLastHandleClosed: true,
|
||||
}
|
||||
v2Container := &hcsschema.Container{Storage: &hcsschema.Storage{}}
|
||||
|
||||
// TODO: Still want to revisit this.
|
||||
if coi.Spec.Windows.LayerFolders == nil || len(coi.Spec.Windows.LayerFolders) < 2 {
|
||||
return nil, fmt.Errorf("invalid spec - not enough layer folders supplied")
|
||||
}
|
||||
|
||||
if coi.Spec.Hostname != "" {
|
||||
v1.HostName = coi.Spec.Hostname
|
||||
v2Container.GuestOs = &hcsschema.GuestOs{HostName: coi.Spec.Hostname}
|
||||
}
|
||||
|
||||
if coi.Spec.Windows.Resources != nil {
|
||||
if coi.Spec.Windows.Resources.CPU != nil {
|
||||
if coi.Spec.Windows.Resources.CPU.Count != nil ||
|
||||
coi.Spec.Windows.Resources.CPU.Shares != nil ||
|
||||
coi.Spec.Windows.Resources.CPU.Maximum != nil {
|
||||
v2Container.Processor = &hcsschema.Processor{}
|
||||
}
|
||||
if coi.Spec.Windows.Resources.CPU.Count != nil {
|
||||
cpuCount := *coi.Spec.Windows.Resources.CPU.Count
|
||||
hostCPUCount := uint64(runtime.NumCPU())
|
||||
if cpuCount > hostCPUCount {
|
||||
logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
|
||||
cpuCount = hostCPUCount
|
||||
}
|
||||
v1.ProcessorCount = uint32(cpuCount)
|
||||
v2Container.Processor.Count = int32(cpuCount)
|
||||
}
|
||||
if coi.Spec.Windows.Resources.CPU.Shares != nil {
|
||||
v1.ProcessorWeight = uint64(*coi.Spec.Windows.Resources.CPU.Shares)
|
||||
v2Container.Processor.Weight = int32(v1.ProcessorWeight)
|
||||
}
|
||||
if coi.Spec.Windows.Resources.CPU.Maximum != nil {
|
||||
v1.ProcessorMaximum = int64(*coi.Spec.Windows.Resources.CPU.Maximum)
|
||||
v2Container.Processor.Maximum = int32(v1.ProcessorMaximum)
|
||||
}
|
||||
}
|
||||
if coi.Spec.Windows.Resources.Memory != nil {
|
||||
if coi.Spec.Windows.Resources.Memory.Limit != nil {
|
||||
v1.MemoryMaximumInMB = int64(*coi.Spec.Windows.Resources.Memory.Limit) / 1024 / 1024
|
||||
v2Container.Memory = &hcsschema.Memory{SizeInMB: int32(v1.MemoryMaximumInMB)}
|
||||
|
||||
}
|
||||
}
|
||||
if coi.Spec.Windows.Resources.Storage != nil {
|
||||
if coi.Spec.Windows.Resources.Storage.Bps != nil || coi.Spec.Windows.Resources.Storage.Iops != nil {
|
||||
v2Container.Storage.QoS = &hcsschema.StorageQoS{}
|
||||
}
|
||||
if coi.Spec.Windows.Resources.Storage.Bps != nil {
|
||||
v1.StorageBandwidthMaximum = *coi.Spec.Windows.Resources.Storage.Bps
|
||||
v2Container.Storage.QoS.BandwidthMaximum = int32(v1.StorageBandwidthMaximum)
|
||||
}
|
||||
if coi.Spec.Windows.Resources.Storage.Iops != nil {
|
||||
v1.StorageIOPSMaximum = *coi.Spec.Windows.Resources.Storage.Iops
|
||||
v2Container.Storage.QoS.IopsMaximum = int32(*coi.Spec.Windows.Resources.Storage.Iops)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO V2 networking. Only partial at the moment. v2.Container.Networking.Namespace specifically
|
||||
if coi.Spec.Windows.Network != nil {
|
||||
v2Container.Networking = &hcsschema.Networking{}
|
||||
|
||||
v1.EndpointList = coi.Spec.Windows.Network.EndpointList
|
||||
v2Container.Networking.Namespace = coi.actualNetworkNamespace
|
||||
|
||||
v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery
|
||||
v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery
|
||||
|
||||
if coi.Spec.Windows.Network.DNSSearchList != nil {
|
||||
v1.DNSSearchList = strings.Join(coi.Spec.Windows.Network.DNSSearchList, ",")
|
||||
v2Container.Networking.DnsSearchList = v1.DNSSearchList
|
||||
}
|
||||
|
||||
v1.NetworkSharedContainerName = coi.Spec.Windows.Network.NetworkSharedContainerName
|
||||
v2Container.Networking.NetworkSharedContainerName = v1.NetworkSharedContainerName
|
||||
}
|
||||
|
||||
// // TODO V2 Credentials not in the schema yet.
|
||||
if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok {
|
||||
v1.Credentials = cs
|
||||
}
|
||||
|
||||
if coi.Spec.Root == nil {
|
||||
return nil, fmt.Errorf("spec is invalid - root isn't populated")
|
||||
}
|
||||
|
||||
if coi.Spec.Root.Readonly {
|
||||
return nil, fmt.Errorf(`invalid container spec - readonly is not supported for Windows containers`)
|
||||
}
|
||||
|
||||
// Strip off the top-most RW/scratch layer as that's passed in separately to HCS for v1
|
||||
v1.LayerFolderPath = coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
|
||||
|
||||
if (schemaversion.IsV21(coi.actualSchemaVersion) && coi.HostingSystem == nil) ||
|
||||
(schemaversion.IsV10(coi.actualSchemaVersion) && coi.Spec.Windows.HyperV == nil) {
|
||||
// Argon v1 or v2.
|
||||
const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
|
||||
if matched, err := regexp.MatchString(volumeGUIDRegex, coi.Spec.Root.Path); !matched || err != nil {
|
||||
return nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, coi.Spec.Root.Path)
|
||||
}
|
||||
if coi.Spec.Root.Path[len(coi.Spec.Root.Path)-1] != '\\' {
|
||||
coi.Spec.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat
|
||||
}
|
||||
v1.VolumePath = coi.Spec.Root.Path[:len(coi.Spec.Root.Path)-1] // Strip the trailing backslash. Required for v1.
|
||||
v2Container.Storage.Path = coi.Spec.Root.Path
|
||||
} else {
|
||||
// A hosting system was supplied, implying v2 Xenon; OR a v1 Xenon.
|
||||
if schemaversion.IsV10(coi.actualSchemaVersion) {
|
||||
// V1 Xenon
|
||||
v1.HvPartition = true
|
||||
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.HyperV == nil { // Be resilient to nil de-reference
|
||||
return nil, fmt.Errorf(`invalid container spec - Spec.Windows.HyperV is nil`)
|
||||
}
|
||||
if coi.Spec.Windows.HyperV.UtilityVMPath != "" {
|
||||
// Client-supplied utility VM path
|
||||
v1.HvRuntime = &schema1.HvRuntime{ImagePath: coi.Spec.Windows.HyperV.UtilityVMPath}
|
||||
} else {
|
||||
// Client was lazy. Let's locate it from the layer folders instead.
|
||||
uvmImagePath, err := uvmfolder.LocateUVMFolder(coi.Spec.Windows.LayerFolders)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1.HvRuntime = &schema1.HvRuntime{ImagePath: filepath.Join(uvmImagePath, `UtilityVM`)}
|
||||
}
|
||||
} else {
|
||||
// Hosting system was supplied, so is v2 Xenon.
|
||||
v2Container.Storage.Path = coi.Spec.Root.Path
|
||||
if coi.HostingSystem.OS() == "windows" {
|
||||
layers, err := computeV2Layers(coi.HostingSystem, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v2Container.Storage.Layers = layers
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if coi.HostingSystem == nil { // Argon v1 or v2
|
||||
for _, layerPath := range coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1] {
|
||||
layerID, err := wclayer.LayerID(layerPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1.Layers = append(v1.Layers, schema1.Layer{ID: layerID.String(), Path: layerPath})
|
||||
v2Container.Storage.Layers = append(v2Container.Storage.Layers, hcsschema.Layer{Id: layerID.String(), Path: layerPath})
|
||||
}
|
||||
}
|
||||
|
||||
// Add the mounts as mapped directories or mapped pipes
|
||||
// TODO: Mapped pipes to add in v2 schema.
|
||||
var (
|
||||
mdsv1 []schema1.MappedDir
|
||||
mpsv1 []schema1.MappedPipe
|
||||
mdsv2 []hcsschema.MappedDirectory
|
||||
mpsv2 []hcsschema.MappedPipe
|
||||
)
|
||||
for _, mount := range coi.Spec.Mounts {
|
||||
const pipePrefix = `\\.\pipe\`
|
||||
if mount.Type != "" {
|
||||
return nil, fmt.Errorf("invalid container spec - Mount.Type '%s' must not be set", mount.Type)
|
||||
}
|
||||
if strings.HasPrefix(strings.ToLower(mount.Destination), pipePrefix) {
|
||||
mpsv1 = append(mpsv1, schema1.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
|
||||
mpsv2 = append(mpsv2, hcsschema.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
|
||||
} else {
|
||||
readOnly := false
|
||||
for _, o := range mount.Options {
|
||||
if strings.ToLower(o) == "ro" {
|
||||
readOnly = true
|
||||
}
|
||||
}
|
||||
mdv1 := schema1.MappedDir{HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: readOnly}
|
||||
mdv2 := hcsschema.MappedDirectory{ContainerPath: mount.Destination, ReadOnly: readOnly}
|
||||
if coi.HostingSystem == nil {
|
||||
mdv2.HostPath = mount.Source
|
||||
} else {
|
||||
uvmPath, err := coi.HostingSystem.GetVSMBUvmPath(mount.Source)
|
||||
if err != nil {
|
||||
if err == uvm.ErrNotAttached {
|
||||
// It could also be a scsi mount.
|
||||
uvmPath, err = coi.HostingSystem.GetScsiUvmPath(mount.Source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
mdv2.HostPath = uvmPath
|
||||
}
|
||||
mdsv1 = append(mdsv1, mdv1)
|
||||
mdsv2 = append(mdsv2, mdv2)
|
||||
}
|
||||
}
|
||||
|
||||
v1.MappedDirectories = mdsv1
|
||||
v2Container.MappedDirectories = mdsv2
|
||||
if len(mpsv1) > 0 && osversion.Get().Build < osversion.RS3 {
|
||||
return nil, fmt.Errorf("named pipe mounts are not supported on this version of Windows")
|
||||
}
|
||||
v1.MappedPipes = mpsv1
|
||||
v2Container.MappedPipes = mpsv2
|
||||
|
||||
// Put the v2Container object as a HostedSystem for a Xenon, or directly in the schema for an Argon.
|
||||
if coi.HostingSystem == nil {
|
||||
v2.Container = v2Container
|
||||
} else {
|
||||
v2.HostingSystemId = coi.HostingSystem.ID()
|
||||
v2.HostedSystem = &hcsschema.HostedSystem{
|
||||
SchemaVersion: schemaversion.SchemaV21(),
|
||||
Container: v2Container,
|
||||
}
|
||||
}
|
||||
|
||||
if schemaversion.IsV10(coi.actualSchemaVersion) {
|
||||
return v1, nil
|
||||
}
|
||||
|
||||
return v2, nil
|
||||
}
|
373
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/layers.go
generated
vendored
373
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/layers.go
generated
vendored
@ -1,373 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcsoci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
"github.com/Microsoft/hcsshim/internal/ospath"
|
||||
"github.com/Microsoft/hcsshim/internal/requesttype"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/uvm"
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type lcowLayerEntry struct {
|
||||
hostPath string
|
||||
uvmPath string
|
||||
scsi bool
|
||||
}
|
||||
|
||||
const scratchPath = "scratch"
|
||||
|
||||
// mountContainerLayers is a helper for clients to hide all the complexity of layer mounting
|
||||
// Layer folder are in order: base, [rolayer1..rolayern,] scratch
|
||||
//
|
||||
// v1/v2: Argon WCOW: Returns the mount path on the host as a volume GUID.
|
||||
// v1: Xenon WCOW: Done internally in HCS, so no point calling doing anything here.
|
||||
// v2: Xenon WCOW: Returns a CombinedLayersV2 structure where ContainerRootPath is a folder
|
||||
// inside the utility VM which is a GUID mapping of the scratch folder. Each
|
||||
// of the layers are the VSMB locations where the read-only layers are mounted.
|
||||
//
|
||||
func MountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM) (interface{}, error) {
|
||||
logrus.Debugln("hcsshim::mountContainerLayers", layerFolders)
|
||||
|
||||
if uvm == nil {
|
||||
if len(layerFolders) < 2 {
|
||||
return nil, fmt.Errorf("need at least two layers - base and scratch")
|
||||
}
|
||||
path := layerFolders[len(layerFolders)-1]
|
||||
rest := layerFolders[:len(layerFolders)-1]
|
||||
logrus.Debugln("hcsshim::mountContainerLayers ActivateLayer", path)
|
||||
if err := wclayer.ActivateLayer(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugln("hcsshim::mountContainerLayers Preparelayer", path, rest)
|
||||
if err := wclayer.PrepareLayer(path, rest); err != nil {
|
||||
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mountPath, err := wclayer.GetLayerMountPath(path)
|
||||
if err != nil {
|
||||
if err := wclayer.UnprepareLayer(path); err != nil {
|
||||
logrus.Warnf("Failed to Unprepare %s: %s", path, err)
|
||||
}
|
||||
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
|
||||
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return mountPath, nil
|
||||
}
|
||||
|
||||
// V2 UVM
|
||||
logrus.Debugf("hcsshim::mountContainerLayers Is a %s V2 UVM", uvm.OS())
|
||||
|
||||
// Add each read-only layers. For Windows, this is a VSMB share with the ResourceUri ending in
|
||||
// a GUID based on the folder path. For Linux, this is a VPMEM device, except where is over the
|
||||
// max size supported, where we put it on SCSI instead.
|
||||
//
|
||||
// Each layer is ref-counted so that multiple containers in the same utility VM can share them.
|
||||
var wcowLayersAdded []string
|
||||
var lcowlayersAdded []lcowLayerEntry
|
||||
attachedSCSIHostPath := ""
|
||||
|
||||
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
|
||||
var err error
|
||||
if uvm.OS() == "windows" {
|
||||
options := &hcsschema.VirtualSmbShareOptions{
|
||||
ReadOnly: true,
|
||||
PseudoOplocks: true,
|
||||
TakeBackupPrivilege: true,
|
||||
CacheIo: true,
|
||||
ShareRead: true,
|
||||
}
|
||||
err = uvm.AddVSMB(layerPath, "", options)
|
||||
if err == nil {
|
||||
wcowLayersAdded = append(wcowLayersAdded, layerPath)
|
||||
}
|
||||
} else {
|
||||
uvmPath := ""
|
||||
hostPath := filepath.Join(layerPath, "layer.vhd")
|
||||
|
||||
var fi os.FileInfo
|
||||
fi, err = os.Stat(hostPath)
|
||||
if err == nil && uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
|
||||
// Too big for PMEM. Add on SCSI instead (at /tmp/S<C>/<L>).
|
||||
var (
|
||||
controller int
|
||||
lun int32
|
||||
)
|
||||
controller, lun, err = uvm.AddSCSILayer(hostPath)
|
||||
if err == nil {
|
||||
lcowlayersAdded = append(lcowlayersAdded,
|
||||
lcowLayerEntry{
|
||||
hostPath: hostPath,
|
||||
uvmPath: fmt.Sprintf("/tmp/S%d/%d", controller, lun),
|
||||
scsi: true,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
_, uvmPath, err = uvm.AddVPMEM(hostPath, true) // UVM path is calculated. Will be /tmp/vN/
|
||||
if err == nil {
|
||||
lcowlayersAdded = append(lcowlayersAdded,
|
||||
lcowLayerEntry{
|
||||
hostPath: hostPath,
|
||||
uvmPath: uvmPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the scratch at an unused SCSI location. The container path inside the
|
||||
// utility VM will be C:\<ID>.
|
||||
hostPath := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
|
||||
|
||||
// BUGBUG Rename guestRoot better.
|
||||
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
|
||||
_, _, err := uvm.AddSCSI(hostPath, containerScratchPathInUVM, false)
|
||||
if err != nil {
|
||||
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
|
||||
return nil, err
|
||||
}
|
||||
attachedSCSIHostPath = hostPath
|
||||
|
||||
if uvm.OS() == "windows" {
|
||||
// Load the filter at the C:\s<ID> location calculated above. We pass into this request each of the
|
||||
// read-only layer folders.
|
||||
layers, err := computeV2Layers(uvm, wcowLayersAdded)
|
||||
if err != nil {
|
||||
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
|
||||
return nil, err
|
||||
}
|
||||
guestRequest := guestrequest.CombinedLayers{
|
||||
ContainerRootPath: containerScratchPathInUVM,
|
||||
Layers: layers,
|
||||
}
|
||||
combinedLayersModification := &hcsschema.ModifySettingRequest{
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
Settings: guestRequest,
|
||||
ResourceType: guestrequest.ResourceTypeCombinedLayers,
|
||||
RequestType: requesttype.Add,
|
||||
},
|
||||
}
|
||||
if err := uvm.Modify(combinedLayersModification); err != nil {
|
||||
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
|
||||
return guestRequest, nil
|
||||
}
|
||||
|
||||
// This is the LCOW layout inside the utilityVM. NNN is the container "number"
|
||||
// which increments for each container created in a utility VM.
|
||||
//
|
||||
// /run/gcs/c/NNN/config.json
|
||||
// /run/gcs/c/NNN/rootfs
|
||||
// /run/gcs/c/NNN/scratch/upper
|
||||
// /run/gcs/c/NNN/scratch/work
|
||||
//
|
||||
// /dev/sda on /tmp/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
|
||||
// /dev/pmem0 on /tmp/v0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
|
||||
// /dev/sdb on /run/gcs/c/NNN/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
|
||||
// overlay on /run/gcs/c/NNN/rootfs type overlay (rw,relatime,lowerdir=/tmp/v0,upperdir=/run/gcs/c/NNN/scratch/upper,workdir=/run/gcs/c/NNN/scratch/work)
|
||||
//
|
||||
// Where /dev/sda is the scratch for utility VM itself
|
||||
// /dev/pmemX are read-only layers for containers
|
||||
// /dev/sd(b...) are scratch spaces for each container
|
||||
|
||||
layers := []hcsschema.Layer{}
|
||||
for _, l := range lcowlayersAdded {
|
||||
layers = append(layers, hcsschema.Layer{Path: l.uvmPath})
|
||||
}
|
||||
guestRequest := guestrequest.CombinedLayers{
|
||||
ContainerRootPath: path.Join(guestRoot, rootfsPath),
|
||||
Layers: layers,
|
||||
ScratchPath: containerScratchPathInUVM,
|
||||
}
|
||||
combinedLayersModification := &hcsschema.ModifySettingRequest{
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeCombinedLayers,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: guestRequest,
|
||||
},
|
||||
}
|
||||
if err := uvm.Modify(combinedLayersModification); err != nil {
|
||||
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
|
||||
return guestRequest, nil
|
||||
|
||||
}
|
||||
|
||||
// UnmountOperation is used when calling Unmount() to determine what type of unmount is
|
||||
// required. In V1 schema, this must be unmountOperationAll. In V2, client can
|
||||
// be more optimal and only unmount what they need which can be a minor performance
|
||||
// improvement (eg if you know only one container is running in a utility VM, and
|
||||
// the UVM is about to be torn down, there's no need to unmount the VSMB shares,
|
||||
// just SCSI to have a consistent file system).
|
||||
type UnmountOperation uint
|
||||
|
||||
const (
|
||||
UnmountOperationSCSI UnmountOperation = 0x01
|
||||
UnmountOperationVSMB = 0x02
|
||||
UnmountOperationVPMEM = 0x04
|
||||
UnmountOperationAll = UnmountOperationSCSI | UnmountOperationVSMB | UnmountOperationVPMEM
|
||||
)
|
||||
|
||||
// UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting
|
||||
func UnmountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM, op UnmountOperation) error {
|
||||
logrus.Debugln("hcsshim::unmountContainerLayers", layerFolders)
|
||||
if uvm == nil {
|
||||
// Must be an argon - folders are mounted on the host
|
||||
if op != UnmountOperationAll {
|
||||
return fmt.Errorf("only operation supported for host-mounted folders is unmountOperationAll")
|
||||
}
|
||||
if len(layerFolders) < 1 {
|
||||
return fmt.Errorf("need at least one layer for Unmount")
|
||||
}
|
||||
path := layerFolders[len(layerFolders)-1]
|
||||
logrus.Debugln("hcsshim::Unmount UnprepareLayer", path)
|
||||
if err := wclayer.UnprepareLayer(path); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO Should we try this anyway?
|
||||
logrus.Debugln("hcsshim::unmountContainerLayers DeactivateLayer", path)
|
||||
return wclayer.DeactivateLayer(path)
|
||||
}
|
||||
|
||||
// V2 Xenon
|
||||
|
||||
// Base+Scratch as a minimum. This is different to v1 which only requires the scratch
|
||||
if len(layerFolders) < 2 {
|
||||
return fmt.Errorf("at least two layers are required for unmount")
|
||||
}
|
||||
|
||||
var retError error
|
||||
|
||||
// Unload the storage filter followed by the SCSI scratch
|
||||
if (op & UnmountOperationSCSI) == UnmountOperationSCSI {
|
||||
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
|
||||
logrus.Debugf("hcsshim::unmountContainerLayers CombinedLayers %s", containerScratchPathInUVM)
|
||||
combinedLayersModification := &hcsschema.ModifySettingRequest{
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeCombinedLayers,
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: guestrequest.CombinedLayers{ContainerRootPath: containerScratchPathInUVM},
|
||||
},
|
||||
}
|
||||
if err := uvm.Modify(combinedLayersModification); err != nil {
|
||||
logrus.Errorf(err.Error())
|
||||
}
|
||||
|
||||
// Hot remove the scratch from the SCSI controller
|
||||
hostScratchFile := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
|
||||
logrus.Debugf("hcsshim::unmountContainerLayers SCSI %s %s", containerScratchPathInUVM, hostScratchFile)
|
||||
if err := uvm.RemoveSCSI(hostScratchFile); err != nil {
|
||||
e := fmt.Errorf("failed to remove SCSI %s: %s", hostScratchFile, err)
|
||||
logrus.Debugln(e)
|
||||
if retError == nil {
|
||||
retError = e
|
||||
} else {
|
||||
retError = errors.Wrapf(retError, e.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove each of the read-only layers from VSMB. These's are ref-counted and
|
||||
// only removed once the count drops to zero. This allows multiple containers
|
||||
// to share layers.
|
||||
if uvm.OS() == "windows" && len(layerFolders) > 1 && (op&UnmountOperationVSMB) == UnmountOperationVSMB {
|
||||
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
|
||||
if e := uvm.RemoveVSMB(layerPath); e != nil {
|
||||
logrus.Debugln(e)
|
||||
if retError == nil {
|
||||
retError = e
|
||||
} else {
|
||||
retError = errors.Wrapf(retError, e.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted
|
||||
// and only removed once the count drops to zero. This allows multiple containers to
|
||||
// share layers. Note that SCSI is used on large layers.
|
||||
if uvm.OS() == "linux" && len(layerFolders) > 1 && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM {
|
||||
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
|
||||
hostPath := filepath.Join(layerPath, "layer.vhd")
|
||||
if fi, err := os.Stat(hostPath); err != nil {
|
||||
var e error
|
||||
if uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
|
||||
e = uvm.RemoveSCSI(hostPath)
|
||||
} else {
|
||||
e = uvm.RemoveVPMEM(hostPath)
|
||||
}
|
||||
if e != nil {
|
||||
logrus.Debugln(e)
|
||||
if retError == nil {
|
||||
retError = e
|
||||
} else {
|
||||
retError = errors.Wrapf(retError, e.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (possibly) Consider deleting the container directory in the utility VM
|
||||
|
||||
return retError
|
||||
}
|
||||
|
||||
func cleanupOnMountFailure(uvm *uvm.UtilityVM, wcowLayers []string, lcowLayers []lcowLayerEntry, scratchHostPath string) {
|
||||
for _, wl := range wcowLayers {
|
||||
if err := uvm.RemoveVSMB(wl); err != nil {
|
||||
logrus.Warnf("Possibly leaked vsmbshare on error removal path: %s", err)
|
||||
}
|
||||
}
|
||||
for _, ll := range lcowLayers {
|
||||
if ll.scsi {
|
||||
if err := uvm.RemoveSCSI(ll.hostPath); err != nil {
|
||||
logrus.Warnf("Possibly leaked SCSI on error removal path: %s", err)
|
||||
}
|
||||
} else if err := uvm.RemoveVPMEM(ll.hostPath); err != nil {
|
||||
logrus.Warnf("Possibly leaked vpmemdevice on error removal path: %s", err)
|
||||
}
|
||||
}
|
||||
if scratchHostPath != "" {
|
||||
if err := uvm.RemoveSCSI(scratchHostPath); err != nil {
|
||||
logrus.Warnf("Possibly leaked SCSI disk on error removal path: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeV2Layers(vm *uvm.UtilityVM, paths []string) (layers []hcsschema.Layer, err error) {
|
||||
for _, path := range paths {
|
||||
uvmPath, err := vm.GetVSMBUvmPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerID, err := wclayer.LayerID(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layers = append(layers, hcsschema.Layer{Id: layerID.String(), Path: uvmPath})
|
||||
}
|
||||
return layers, nil
|
||||
}
|
41
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go
generated
vendored
41
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/network.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
package hcsoci
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim/internal/hns"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func createNetworkNamespace(coi *createOptionsInternal, resources *Resources) error {
|
||||
netID, err := hns.CreateNamespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("created network namespace %s for %s", netID, coi.ID)
|
||||
resources.netNS = netID
|
||||
resources.createdNetNS = true
|
||||
for _, endpointID := range coi.Spec.Windows.Network.EndpointList {
|
||||
err = hns.AddNamespaceEndpoint(netID, endpointID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("added network endpoint %s to namespace %s", endpointID, netID)
|
||||
resources.networkEndpoints = append(resources.networkEndpoints, endpointID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNamespaceEndpoints(netNS string) ([]*hns.HNSEndpoint, error) {
|
||||
ids, err := hns.GetNamespaceEndpoints(netNS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var endpoints []*hns.HNSEndpoint
|
||||
for _, id := range ids {
|
||||
endpoint, err := hns.GetHNSEndpointByID(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
127
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources.go
generated
vendored
127
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
package hcsoci
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hns"
|
||||
"github.com/Microsoft/hcsshim/internal/uvm"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NetNS returns the network namespace for the container
|
||||
func (r *Resources) NetNS() string {
|
||||
return r.netNS
|
||||
}
|
||||
|
||||
// Resources is the structure returned as part of creating a container. It holds
|
||||
// nothing useful to clients, hence everything is lowercased. A client would use
|
||||
// it in a call to ReleaseResource to ensure everything is cleaned up when a
|
||||
// container exits.
|
||||
type Resources struct {
|
||||
// containerRootInUVM is the base path in a utility VM where elements relating
|
||||
// to a container are exposed. For example, the mounted filesystem; the runtime
|
||||
// spec (in the case of LCOW); overlay and scratch (in the case of LCOW).
|
||||
//
|
||||
// For WCOW, this will be under C:\c\N, and for LCOW this will
|
||||
// be under /run/gcs/c/N. N is an atomic counter for each container created
|
||||
// in that utility VM. For LCOW this is also the "OCI Bundle Path".
|
||||
containerRootInUVM string
|
||||
|
||||
// layers is an array of the layer folder paths which have been mounted either on
|
||||
// the host in the case or a WCOW Argon, or in a utility VM for WCOW Xenon and LCOW.
|
||||
layers []string
|
||||
|
||||
// vsmbMounts is an array of the host-paths mounted into a utility VM to support
|
||||
// (bind-)mounts into a WCOW v2 Xenon.
|
||||
vsmbMounts []string
|
||||
|
||||
// plan9Mounts is an array of all the host paths which have been added to
|
||||
// an LCOW utility VM
|
||||
plan9Mounts []string
|
||||
|
||||
// netNS is the network namespace
|
||||
netNS string
|
||||
|
||||
// networkEndpoints is the list of network endpoints used by the container
|
||||
networkEndpoints []string
|
||||
|
||||
// createNetNS indicates if the network namespace has been created
|
||||
createdNetNS bool
|
||||
|
||||
// addedNetNSToVM indicates if the network namespace has been added to the containers utility VM
|
||||
addedNetNSToVM bool
|
||||
|
||||
// scsiMounts is an array of the host-paths mounted into a utility VM to
|
||||
// support scsi device passthrough.
|
||||
scsiMounts []string
|
||||
}
|
||||
|
||||
// TODO: Method on the resources?
|
||||
func ReleaseResources(r *Resources, vm *uvm.UtilityVM, all bool) error {
|
||||
if vm != nil && r.addedNetNSToVM {
|
||||
err := vm.RemoveNetNS(r.netNS)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
r.addedNetNSToVM = false
|
||||
}
|
||||
|
||||
if r.createdNetNS {
|
||||
for len(r.networkEndpoints) != 0 {
|
||||
endpoint := r.networkEndpoints[len(r.networkEndpoints)-1]
|
||||
err := hns.RemoveNamespaceEndpoint(r.netNS, endpoint)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
logrus.Warnf("removing endpoint %s from namespace %s: does not exist", endpoint, r.NetNS())
|
||||
}
|
||||
r.networkEndpoints = r.networkEndpoints[:len(r.networkEndpoints)-1]
|
||||
}
|
||||
r.networkEndpoints = nil
|
||||
err := hns.RemoveNamespace(r.netNS)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
r.createdNetNS = false
|
||||
}
|
||||
|
||||
if len(r.layers) != 0 {
|
||||
op := UnmountOperationSCSI
|
||||
if vm == nil || all {
|
||||
op = UnmountOperationAll
|
||||
}
|
||||
err := UnmountContainerLayers(r.layers, r.containerRootInUVM, vm, op)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.layers = nil
|
||||
}
|
||||
|
||||
if all {
|
||||
for len(r.vsmbMounts) != 0 {
|
||||
mount := r.vsmbMounts[len(r.vsmbMounts)-1]
|
||||
if err := vm.RemoveVSMB(mount); err != nil {
|
||||
return err
|
||||
}
|
||||
r.vsmbMounts = r.vsmbMounts[:len(r.vsmbMounts)-1]
|
||||
}
|
||||
|
||||
for len(r.plan9Mounts) != 0 {
|
||||
mount := r.plan9Mounts[len(r.plan9Mounts)-1]
|
||||
if err := vm.RemovePlan9(mount); err != nil {
|
||||
return err
|
||||
}
|
||||
r.plan9Mounts = r.plan9Mounts[:len(r.plan9Mounts)-1]
|
||||
}
|
||||
|
||||
for _, path := range r.scsiMounts {
|
||||
if err := vm.RemoveSCSI(path); err != nil {
|
||||
return err
|
||||
}
|
||||
r.scsiMounts = nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
104
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go
generated
vendored
104
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_lcow.go
generated
vendored
@ -1,104 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcsoci
|
||||
|
||||
// Contains functions relating to a LCOW container, as opposed to a utility VM
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const rootfsPath = "rootfs"
|
||||
const mountPathPrefix = "m"
|
||||
|
||||
func allocateLinuxResources(coi *createOptionsInternal, resources *Resources) error {
|
||||
if coi.Spec.Root == nil {
|
||||
coi.Spec.Root = &specs.Root{}
|
||||
}
|
||||
if coi.Spec.Root.Path == "" {
|
||||
logrus.Debugln("hcsshim::allocateLinuxResources mounting storage")
|
||||
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mount container storage: %s", err)
|
||||
}
|
||||
if coi.HostingSystem == nil {
|
||||
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
|
||||
} else {
|
||||
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon LCOW
|
||||
}
|
||||
resources.layers = coi.Spec.Windows.LayerFolders
|
||||
} else {
|
||||
// This is the "Plan 9" root filesystem.
|
||||
// TODO: We need a test for this. Ask @jstarks how you can even lay this out on Windows.
|
||||
hostPath := coi.Spec.Root.Path
|
||||
uvmPathForContainersFileSystem := path.Join(resources.containerRootInUVM, rootfsPath)
|
||||
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding plan9 root: %s", err)
|
||||
}
|
||||
coi.Spec.Root.Path = uvmPathForContainersFileSystem
|
||||
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
|
||||
}
|
||||
|
||||
for i, mount := range coi.Spec.Mounts {
|
||||
switch mount.Type {
|
||||
case "bind":
|
||||
case "physical-disk":
|
||||
case "virtual-disk":
|
||||
default:
|
||||
// Unknown mount type
|
||||
continue
|
||||
}
|
||||
if mount.Destination == "" || mount.Source == "" {
|
||||
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
|
||||
}
|
||||
|
||||
if coi.HostingSystem != nil {
|
||||
hostPath := mount.Source
|
||||
uvmPathForShare := path.Join(resources.containerRootInUVM, mountPathPrefix+strconv.Itoa(i))
|
||||
|
||||
readOnly := false
|
||||
for _, o := range mount.Options {
|
||||
if strings.ToLower(o) == "ro" {
|
||||
readOnly = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if mount.Type == "physical-disk" {
|
||||
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
|
||||
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(hostPath, uvmPathForShare, readOnly)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
|
||||
}
|
||||
resources.scsiMounts = append(resources.scsiMounts, hostPath)
|
||||
coi.Spec.Mounts[i].Type = "none"
|
||||
} else if mount.Type == "virtual-disk" {
|
||||
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
|
||||
_, _, err := coi.HostingSystem.AddSCSI(hostPath, uvmPathForShare, readOnly)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
|
||||
}
|
||||
resources.scsiMounts = append(resources.scsiMounts, hostPath)
|
||||
coi.Spec.Mounts[i].Type = "none"
|
||||
} else {
|
||||
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount %+v", mount)
|
||||
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForShare, readOnly)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding plan9 mount %+v: %s", mount, err)
|
||||
}
|
||||
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
|
||||
}
|
||||
coi.Spec.Mounts[i].Source = uvmPathForShare
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
127
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go
generated
vendored
127
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/resources_wcow.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package hcsoci
|
||||
|
||||
// Contains functions relating to a WCOW container, as opposed to a utility VM
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func allocateWindowsResources(coi *createOptionsInternal, resources *Resources) error {
|
||||
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.LayerFolders == nil {
|
||||
return fmt.Errorf("field 'Spec.Windows.Layerfolders' is not populated")
|
||||
}
|
||||
|
||||
scratchFolder := coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
|
||||
logrus.Debugf("hcsshim::allocateWindowsResources scratch folder: %s", scratchFolder)
|
||||
|
||||
// TODO: Remove this code for auto-creation. Make the caller responsible.
|
||||
// Create the directory for the RW scratch layer if it doesn't exist
|
||||
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
|
||||
logrus.Debugf("hcsshim::allocateWindowsResources container scratch folder does not exist so creating: %s ", scratchFolder)
|
||||
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
|
||||
return fmt.Errorf("failed to auto-create container scratch folder %s: %s", scratchFolder, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create sandbox.vhdx if it doesn't exist in the scratch folder. It's called sandbox.vhdx
|
||||
// rather than scratch.vhdx as in the v1 schema, it's hard-coded in HCS.
|
||||
if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) {
|
||||
logrus.Debugf("hcsshim::allocateWindowsResources container sandbox.vhdx does not exist so creating in %s ", scratchFolder)
|
||||
if err := wclayer.CreateScratchLayer(scratchFolder, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]); err != nil {
|
||||
return fmt.Errorf("failed to CreateSandboxLayer %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if coi.Spec.Root == nil {
|
||||
coi.Spec.Root = &specs.Root{}
|
||||
}
|
||||
|
||||
if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) {
|
||||
logrus.Debugln("hcsshim::allocateWindowsResources mounting storage")
|
||||
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mount container storage: %s", err)
|
||||
}
|
||||
if coi.HostingSystem == nil {
|
||||
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
|
||||
} else {
|
||||
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon WCOW
|
||||
}
|
||||
resources.layers = coi.Spec.Windows.LayerFolders
|
||||
}
|
||||
|
||||
// Validate each of the mounts. If this is a V2 Xenon, we have to add them as
|
||||
// VSMB shares to the utility VM. For V1 Xenon and Argons, there's nothing for
|
||||
// us to do as it's done by HCS.
|
||||
for i, mount := range coi.Spec.Mounts {
|
||||
if mount.Destination == "" || mount.Source == "" {
|
||||
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
|
||||
}
|
||||
switch mount.Type {
|
||||
case "":
|
||||
case "physical-disk":
|
||||
case "virtual-disk":
|
||||
default:
|
||||
return fmt.Errorf("invalid OCI spec - Type '%s' not supported", mount.Type)
|
||||
}
|
||||
|
||||
if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) {
|
||||
uvmPath := fmt.Sprintf("C:\\%s\\%d", coi.actualID, i)
|
||||
|
||||
readOnly := false
|
||||
for _, o := range mount.Options {
|
||||
if strings.ToLower(o) == "ro" {
|
||||
readOnly = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if mount.Type == "physical-disk" {
|
||||
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
|
||||
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(mount.Source, uvmPath, readOnly)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
|
||||
}
|
||||
coi.Spec.Mounts[i].Type = ""
|
||||
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
|
||||
} else if mount.Type == "virtual-disk" {
|
||||
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
|
||||
_, _, err := coi.HostingSystem.AddSCSI(mount.Source, uvmPath, readOnly)
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
|
||||
}
|
||||
coi.Spec.Mounts[i].Type = ""
|
||||
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
|
||||
} else {
|
||||
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding VSMB share for OCI mount %+v", mount)
|
||||
options := &hcsschema.VirtualSmbShareOptions{}
|
||||
if readOnly {
|
||||
options.ReadOnly = true
|
||||
options.CacheIo = true
|
||||
options.ShareRead = true
|
||||
options.ForceLevelIIOplocks = true
|
||||
break
|
||||
}
|
||||
|
||||
err := coi.HostingSystem.AddVSMB(mount.Source, "", options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add VSMB share to utility VM for mount %+v: %s", mount, err)
|
||||
}
|
||||
resources.vsmbMounts = append(resources.vsmbMounts, mount.Source)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
260
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_argon_test.go
generated
vendored
260
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_argon_test.go
generated
vendored
@ -1,260 +0,0 @@
|
||||
// +build windows,functional
|
||||
|
||||
package hcsoci
|
||||
|
||||
//import (
|
||||
// "os"
|
||||
// "path/filepath"
|
||||
// "testing"
|
||||
|
||||
// "github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
// specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
//)
|
||||
|
||||
//// --------------------------------
|
||||
//// W C O W A R G O N V 1
|
||||
//// --------------------------------
|
||||
|
||||
//// A v1 Argon with a single base layer. It also validates hostname functionality is propagated.
|
||||
//func TestV1Argon(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
// tempDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(tempDir)
|
||||
|
||||
// layers := append(layersNanoserver, tempDir)
|
||||
// mountPath, err := mountContainerLayers(layers, nil)
|
||||
// if err != nil {
|
||||
// t.Fatalf("failed to mount container storage: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// SchemaVersion: schemaversion.SchemaV10(),
|
||||
// Id: "TestV1Argon",
|
||||
// Owner: "unit-test",
|
||||
// Spec: &specs.Spec{
|
||||
// Hostname: "goofy",
|
||||
// Windows: &specs.Windows{LayerFolders: layers},
|
||||
// Root: &specs.Root{Path: mountPath.(string)},
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
|
||||
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "goofy")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
||||
|
||||
//// A v1 Argon with a single base layer which uses the auto-mount capability
|
||||
//func TestV1ArgonAutoMount(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
// tempDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(tempDir)
|
||||
|
||||
// layers := append(layersBusybox, tempDir)
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// Id: "TestV1ArgonAutoMount",
|
||||
// SchemaVersion: schemaversion.SchemaV10(),
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
||||
|
||||
//// A v1 Argon with multiple layers which uses the auto-mount capability
|
||||
//func TestV1ArgonMultipleBaseLayersAutoMount(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
|
||||
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
|
||||
// containerScratchDir := createTempDir(t)
|
||||
// os.RemoveAll(containerScratchDir)
|
||||
// defer os.RemoveAll(containerScratchDir) // As auto-created
|
||||
|
||||
// layers := append(layersBusybox, containerScratchDir)
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// Id: "TestV1ArgonMultipleBaseLayersAutoMount",
|
||||
// SchemaVersion: schemaversion.SchemaV10(),
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
||||
|
||||
//// A v1 Argon with a single mapped directory.
|
||||
//func TestV1ArgonSingleMappedDirectory(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
// tempDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(tempDir)
|
||||
|
||||
// layers := append(layersNanoserver, tempDir)
|
||||
|
||||
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
|
||||
// source := createTempDir(t)
|
||||
// defer os.RemoveAll(source)
|
||||
// mount := specs.Mount{
|
||||
// Source: source,
|
||||
// Destination: `c:\foo`,
|
||||
// }
|
||||
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
|
||||
// f.Close()
|
||||
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// SchemaVersion: schemaversion.SchemaV10(),
|
||||
// Spec: &specs.Spec{
|
||||
// Windows: &specs.Windows{LayerFolders: layers},
|
||||
// Mounts: []specs.Mount{mount},
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
||||
|
||||
//// --------------------------------
|
||||
//// W C O W A R G O N V 2
|
||||
//// --------------------------------
|
||||
|
||||
//// A v2 Argon with a single base layer. It also validates hostname functionality is propagated.
|
||||
//// It also uses an auto-generated ID.
|
||||
//func TestV2Argon(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
// tempDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(tempDir)
|
||||
|
||||
// layers := append(layersNanoserver, tempDir)
|
||||
// mountPath, err := mountContainerLayers(layers, nil)
|
||||
// if err != nil {
|
||||
// t.Fatalf("failed to mount container storage: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Spec: &specs.Spec{
|
||||
// Hostname: "mickey",
|
||||
// Windows: &specs.Windows{LayerFolders: layers},
|
||||
// Root: &specs.Root{Path: mountPath.(string)},
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
|
||||
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "mickey")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
||||
|
||||
//// A v2 Argon with multiple layers
|
||||
//func TestV2ArgonMultipleBaseLayers(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
// tempDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(tempDir)
|
||||
|
||||
// layers := append(layersBusybox, tempDir)
|
||||
// mountPath, err := mountContainerLayers(layers, nil)
|
||||
// if err != nil {
|
||||
// t.Fatalf("failed to mount container storage: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Id: "TestV2ArgonMultipleBaseLayers",
|
||||
// Spec: &specs.Spec{
|
||||
// Windows: &specs.Windows{LayerFolders: layers},
|
||||
// Root: &specs.Root{Path: mountPath.(string)},
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
||||
|
||||
//// A v2 Argon with multiple layers which uses the auto-mount capability and auto-create
|
||||
//func TestV2ArgonAutoMountMultipleBaseLayers(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
|
||||
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
|
||||
// containerScratchDir := createTempDir(t)
|
||||
// os.RemoveAll(containerScratchDir)
|
||||
// defer os.RemoveAll(containerScratchDir) // As auto-created
|
||||
|
||||
// layers := append(layersBusybox, containerScratchDir)
|
||||
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Id: "TestV2ArgonAutoMountMultipleBaseLayers",
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
||||
|
||||
//// A v2 Argon with a single mapped directory.
|
||||
//func TestV2ArgonSingleMappedDirectory(t *testing.T) {
|
||||
// t.Skip("fornow")
|
||||
// tempDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(tempDir)
|
||||
|
||||
// layers := append(layersNanoserver, tempDir)
|
||||
|
||||
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
|
||||
// source := createTempDir(t)
|
||||
// defer os.RemoveAll(source)
|
||||
// mount := specs.Mount{
|
||||
// Source: source,
|
||||
// Destination: `c:\foo`,
|
||||
// }
|
||||
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
|
||||
// f.Close()
|
||||
|
||||
// c, err := CreateContainer(&CreateOptions{
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Spec: &specs.Spec{
|
||||
// Windows: &specs.Windows{LayerFolders: layers},
|
||||
// Mounts: []specs.Mount{mount},
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("Failed create: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
|
||||
|
||||
// startContainer(t, c)
|
||||
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
|
||||
// stopContainer(t, c)
|
||||
// c.Terminate()
|
||||
//}
|
365
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_xenon_test.go
generated
vendored
365
vendor/github.com/Microsoft/hcsshim/internal/hcsoci/wcow_xenon_test.go
generated
vendored
@ -1,365 +0,0 @@
|
||||
// +build windows,functional
|
||||
|
||||
package hcsoci
|
||||
|
||||
//import (
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "path/filepath"
|
||||
// "testing"
|
||||
|
||||
// "github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
// specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
//)
|
||||
|
||||
//// --------------------------------
|
||||
//// W C O W X E N O N V 2
|
||||
//// --------------------------------
|
||||
|
||||
//// A single WCOW xenon. Note in this test, neither the UVM or the
|
||||
//// containers are supplied IDs - they will be autogenerated for us.
|
||||
//// This is the minimum set of parameters needed to create a V2 WCOW xenon.
|
||||
//func TestV2XenonWCOW(t *testing.T) {
|
||||
// t.Skip("Skipping for now")
|
||||
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
|
||||
// defer os.RemoveAll(uvmScratchDir)
|
||||
// defer uvm.Terminate()
|
||||
|
||||
// // Create the container hosted inside the utility VM
|
||||
// containerScratchDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(containerScratchDir)
|
||||
// layerFolders := append(layersNanoserver, containerScratchDir)
|
||||
// hostedContainer, err := CreateContainer(&CreateOptions{
|
||||
// HostingSystem: uvm,
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("CreateContainer failed: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
|
||||
|
||||
// // Start/stop the container
|
||||
// startContainer(t, hostedContainer)
|
||||
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
|
||||
// stopContainer(t, hostedContainer)
|
||||
// hostedContainer.Terminate()
|
||||
//}
|
||||
|
||||
//// TODO: Have a similar test where the UVM scratch folder does not exist.
|
||||
//// A single WCOW xenon but where the container sandbox folder is not pre-created by the client
|
||||
//func TestV2XenonWCOWContainerSandboxFolderDoesNotExist(t *testing.T) {
|
||||
// t.Skip("Skipping for now")
|
||||
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWContainerSandboxFolderDoesNotExist_UVM", nil)
|
||||
// defer os.RemoveAll(uvmScratchDir)
|
||||
// defer uvm.Terminate()
|
||||
|
||||
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
|
||||
// containerScratchDir := createTempDir(t)
|
||||
// os.RemoveAll(containerScratchDir)
|
||||
// defer os.RemoveAll(containerScratchDir) // As auto-created
|
||||
|
||||
// layerFolders := append(layersBusybox, containerScratchDir)
|
||||
// hostedContainer, err := CreateContainer(&CreateOptions{
|
||||
// Id: "container",
|
||||
// HostingSystem: uvm,
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("CreateContainer failed: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
|
||||
|
||||
// // Start/stop the container
|
||||
// startContainer(t, hostedContainer)
|
||||
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
|
||||
// stopContainer(t, hostedContainer)
|
||||
// hostedContainer.Terminate()
|
||||
//}
|
||||
|
||||
//// TODO What about mount. Test with the client doing the mount.
|
||||
//// TODO Test as above, but where sandbox for UVM is entirely created by a client to show how it's done.
|
||||
|
||||
//// Two v2 WCOW containers in the same UVM, each with a single base layer
|
||||
//func TestV2XenonWCOWTwoContainers(t *testing.T) {
|
||||
// t.Skip("Skipping for now")
|
||||
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWTwoContainers_UVM", nil)
|
||||
// defer os.RemoveAll(uvmScratchDir)
|
||||
// defer uvm.Terminate()
|
||||
|
||||
// // First hosted container
|
||||
// firstContainerScratchDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(firstContainerScratchDir)
|
||||
// firstLayerFolders := append(layersNanoserver, firstContainerScratchDir)
|
||||
// firstHostedContainer, err := CreateContainer(&CreateOptions{
|
||||
// Id: "FirstContainer",
|
||||
// HostingSystem: uvm,
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: firstLayerFolders}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("CreateContainer failed: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(firstLayerFolders, uvm, unmountOperationAll)
|
||||
|
||||
// // Second hosted container
|
||||
// secondContainerScratchDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(firstContainerScratchDir)
|
||||
// secondLayerFolders := append(layersNanoserver, secondContainerScratchDir)
|
||||
// secondHostedContainer, err := CreateContainer(&CreateOptions{
|
||||
// Id: "SecondContainer",
|
||||
// HostingSystem: uvm,
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: secondLayerFolders}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("CreateContainer failed: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(secondLayerFolders, uvm, unmountOperationAll)
|
||||
|
||||
// startContainer(t, firstHostedContainer)
|
||||
// runCommand(t, firstHostedContainer, "cmd /s /c echo FirstContainer", `c:\`, "FirstContainer")
|
||||
// startContainer(t, secondHostedContainer)
|
||||
// runCommand(t, secondHostedContainer, "cmd /s /c echo SecondContainer", `c:\`, "SecondContainer")
|
||||
// stopContainer(t, firstHostedContainer)
|
||||
// stopContainer(t, secondHostedContainer)
|
||||
// firstHostedContainer.Terminate()
|
||||
// secondHostedContainer.Terminate()
|
||||
//}
|
||||
|
||||
////// This verifies the container storage is unmounted correctly so that a second
|
||||
////// container can be started from the same storage.
|
||||
////func TestV2XenonWCOWWithRemount(t *testing.T) {
|
||||
////// //t.Skip("Skipping for now")
|
||||
//// uvmID := "Testv2XenonWCOWWithRestart_UVM"
|
||||
//// uvmScratchDir, err := ioutil.TempDir("", "uvmScratch")
|
||||
//// if err != nil {
|
||||
//// t.Fatalf("Failed create temporary directory: %s", err)
|
||||
//// }
|
||||
//// if err := CreateWCOWSandbox(layersNanoserver[0], uvmScratchDir, uvmID); err != nil {
|
||||
//// t.Fatalf("Failed create Windows UVM Sandbox: %s", err)
|
||||
//// }
|
||||
//// defer os.RemoveAll(uvmScratchDir)
|
||||
|
||||
//// uvm, err := CreateContainer(&CreateOptions{
|
||||
//// Id: uvmID,
|
||||
//// Owner: "unit-test",
|
||||
//// SchemaVersion: SchemaV21(),
|
||||
//// IsHostingSystem: true,
|
||||
//// Spec: &specs.Spec{
|
||||
//// Windows: &specs.Windows{
|
||||
//// LayerFolders: []string{uvmScratchDir},
|
||||
//// HyperV: &specs.WindowsHyperV{UtilityVMPath: filepath.Join(layersNanoserver[0], `UtilityVM\Files`)},
|
||||
//// },
|
||||
//// },
|
||||
//// })
|
||||
//// if err != nil {
|
||||
//// t.Fatalf("Failed create UVM: %s", err)
|
||||
//// }
|
||||
//// defer uvm.Terminate()
|
||||
//// if err := uvm.Start(); err != nil {
|
||||
//// t.Fatalf("Failed start utility VM: %s", err)
|
||||
//// }
|
||||
|
||||
//// // Mount the containers storage in the utility VM
|
||||
//// containerScratchDir := createWCOWTempDirWithSandbox(t)
|
||||
//// layerFolders := append(layersNanoserver, containerScratchDir)
|
||||
//// cls, err := Mount(layerFolders, uvm, SchemaV21())
|
||||
//// if err != nil {
|
||||
//// t.Fatalf("failed to mount container storage: %s", err)
|
||||
//// }
|
||||
//// combinedLayers := cls.(CombinedLayersV2)
|
||||
//// mountedLayers := &ContainersResourcesStorageV2{
|
||||
//// Layers: combinedLayers.Layers,
|
||||
//// Path: combinedLayers.ContainerRootPath,
|
||||
//// }
|
||||
//// defer func() {
|
||||
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
|
||||
//// t.Fatalf("failed to unmount container storage: %s", err)
|
||||
//// }
|
||||
//// }()
|
||||
|
||||
//// // Create the first container
|
||||
//// defer os.RemoveAll(containerScratchDir)
|
||||
//// xenon, err := CreateContainer(&CreateOptions{
|
||||
//// Id: "container",
|
||||
//// Owner: "unit-test",
|
||||
//// HostingSystem: uvm,
|
||||
//// SchemaVersion: SchemaV21(),
|
||||
//// Spec: &specs.Spec{Windows: &specs.Windows{}}, // No layerfolders as we mounted them ourself.
|
||||
//// })
|
||||
//// if err != nil {
|
||||
//// t.Fatalf("CreateContainer failed: %s", err)
|
||||
//// }
|
||||
|
||||
//// // Start/stop the first container
|
||||
//// startContainer(t, xenon)
|
||||
//// runCommand(t, xenon, "cmd /s /c echo TestV2XenonWCOWFirstStart", `c:\`, "TestV2XenonWCOWFirstStart")
|
||||
//// stopContainer(t, xenon)
|
||||
//// xenon.Terminate()
|
||||
|
||||
//// // Now unmount and remount to exactly the same places
|
||||
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
|
||||
//// t.Fatalf("failed to unmount container storage: %s", err)
|
||||
//// }
|
||||
//// if _, err = Mount(layerFolders, uvm, SchemaV21()); err != nil {
|
||||
//// t.Fatalf("failed to mount container storage: %s", err)
|
||||
//// }
|
||||
|
||||
//// // Create an identical second container and verify it works too.
|
||||
//// xenon2, err := CreateContainer(&CreateOptions{
|
||||
//// Id: "container",
|
||||
//// Owner: "unit-test",
|
||||
//// HostingSystem: uvm,
|
||||
//// SchemaVersion: SchemaV21(),
|
||||
//// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
|
||||
//// MountedLayers: mountedLayers,
|
||||
//// })
|
||||
//// if err != nil {
|
||||
//// t.Fatalf("CreateContainer failed: %s", err)
|
||||
//// }
|
||||
//// startContainer(t, xenon2)
|
||||
//// runCommand(t, xenon2, "cmd /s /c echo TestV2XenonWCOWAfterRemount", `c:\`, "TestV2XenonWCOWAfterRemount")
|
||||
//// stopContainer(t, xenon2)
|
||||
//// xenon2.Terminate()
|
||||
////}
|
||||
|
||||
//// Lots of v2 WCOW containers in the same UVM, each with a single base layer. Containers aren't
|
||||
//// actually started, but it stresses the SCSI controller hot-add logic.
|
||||
//func TestV2XenonWCOWCreateLots(t *testing.T) {
|
||||
// t.Skip("Skipping for now")
|
||||
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWCreateLots", nil)
|
||||
// defer os.RemoveAll(uvmScratchDir)
|
||||
// defer uvm.Terminate()
|
||||
|
||||
// // 63 as 0:0 is already taken as the UVMs scratch. So that leaves us with 64-1 left for container scratches on SCSI
|
||||
// for i := 0; i < 63; i++ {
|
||||
// containerScratchDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(containerScratchDir)
|
||||
// layerFolders := append(layersNanoserver, containerScratchDir)
|
||||
// hostedContainer, err := CreateContainer(&CreateOptions{
|
||||
// Id: fmt.Sprintf("container%d", i),
|
||||
// HostingSystem: uvm,
|
||||
// SchemaVersion: schemaversion.SchemaV21(),
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("CreateContainer failed: %s", err)
|
||||
// }
|
||||
// defer hostedContainer.Terminate()
|
||||
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
|
||||
// }
|
||||
|
||||
// // TODO: Should check the internal structures here for VSMB and SCSI
|
||||
|
||||
// // TODO: Push it over 63 now and will get a failure.
|
||||
//}
|
||||
|
||||
//// Helper for the v2 Xenon tests to create a utility VM. Returns the UtilityVM
|
||||
//// object; folder used as its scratch
|
||||
//func createv2WCOWUVM(t *testing.T, uvmLayers []string, uvmId string, resources *specs.WindowsResources) (*UtilityVM, string) {
|
||||
// scratchDir := createTempDir(t)
|
||||
// uvm := UtilityVM{
|
||||
// OperatingSystem: "windows",
|
||||
// LayerFolders: append(uvmLayers, scratchDir),
|
||||
// Resources: resources,
|
||||
// }
|
||||
// if uvmId != "" {
|
||||
// uvm.Id = uvmId
|
||||
// }
|
||||
// if err := uvm.Create(); err != nil {
|
||||
// t.Fatalf("Failed create WCOW v2 UVM: %s", err)
|
||||
// }
|
||||
// if err := uvm.Start(); err != nil {
|
||||
// t.Fatalf("Failed start WCOW v2UVM: %s", err)
|
||||
|
||||
// }
|
||||
// return &uvm, scratchDir
|
||||
//}
|
||||
|
||||
//// TestV2XenonWCOWMultiLayer creates a V2 Xenon having multiple image layers
|
||||
//func TestV2XenonWCOWMultiLayer(t *testing.T) {
|
||||
// t.Skip("for now")
|
||||
|
||||
// uvmMemory := uint64(1 * 1024 * 1024 * 1024)
|
||||
// uvmCPUCount := uint64(2)
|
||||
// resources := &specs.WindowsResources{
|
||||
// Memory: &specs.WindowsMemoryResources{
|
||||
// Limit: &uvmMemory,
|
||||
// },
|
||||
// CPU: &specs.WindowsCPUResources{
|
||||
// Count: &uvmCPUCount,
|
||||
// },
|
||||
// }
|
||||
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWMultiLayer_UVM", resources)
|
||||
// defer os.RemoveAll(uvmScratchDir)
|
||||
// defer uvm.Terminate()
|
||||
|
||||
// // Create a sandbox for the hosted container
|
||||
// containerScratchDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(containerScratchDir)
|
||||
|
||||
// // Create the container. Note that this will auto-mount for us.
|
||||
// containerLayers := append(layersBusybox, containerScratchDir)
|
||||
// xenon, err := CreateContainer(&CreateOptions{
|
||||
// Id: "container",
|
||||
// HostingSystem: uvm,
|
||||
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: containerLayers}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("CreateContainer failed: %s", err)
|
||||
// }
|
||||
|
||||
// // Start/stop the container
|
||||
// startContainer(t, xenon)
|
||||
// runCommand(t, xenon, "echo Container", `c:\`, "Container")
|
||||
// stopContainer(t, xenon)
|
||||
// xenon.Terminate()
|
||||
// // TODO Move this to a defer function to fail if it fails.
|
||||
// if err := unmountContainerLayers(containerLayers, uvm, unmountOperationAll); err != nil {
|
||||
// t.Fatalf("unmount failed: %s", err)
|
||||
// }
|
||||
|
||||
//}
|
||||
|
||||
//// TestV2XenonWCOWSingleMappedDirectory tests a V2 Xenon WCOW with a single mapped directory
|
||||
//func TestV2XenonWCOWSingleMappedDirectory(t *testing.T) {
|
||||
// t.Skip("Skipping for now")
|
||||
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
|
||||
// defer os.RemoveAll(uvmScratchDir)
|
||||
// defer uvm.Terminate()
|
||||
|
||||
// // Create the container hosted inside the utility VM
|
||||
// containerScratchDir := createWCOWTempDirWithSandbox(t)
|
||||
// defer os.RemoveAll(containerScratchDir)
|
||||
// layerFolders := append(layersNanoserver, containerScratchDir)
|
||||
|
||||
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
|
||||
// source := createTempDir(t)
|
||||
// defer os.RemoveAll(source)
|
||||
// mount := specs.Mount{
|
||||
// Source: source,
|
||||
// Destination: `c:\foo`,
|
||||
// }
|
||||
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
|
||||
// f.Close()
|
||||
|
||||
// hostedContainer, err := CreateContainer(&CreateOptions{
|
||||
// HostingSystem: uvm,
|
||||
// Spec: &specs.Spec{
|
||||
// Windows: &specs.Windows{LayerFolders: layerFolders},
|
||||
// Mounts: []specs.Mount{mount},
|
||||
// },
|
||||
// })
|
||||
// if err != nil {
|
||||
// t.Fatalf("CreateContainer failed: %s", err)
|
||||
// }
|
||||
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
|
||||
|
||||
// // TODO BUGBUG NEED TO UNMOUNT TO VSMB SHARE FOR THE CONTAINER
|
||||
|
||||
// // Start/stop the container
|
||||
// startContainer(t, hostedContainer)
|
||||
// runCommand(t, hostedContainer, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
|
||||
// stopContainer(t, hostedContainer)
|
||||
// hostedContainer.Terminate()
|
||||
//}
|
9
vendor/github.com/Microsoft/hcsshim/internal/lcow/constants.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/lcow/constants.go
generated
vendored
@ -1,9 +0,0 @@
|
||||
package lcow
|
||||
|
||||
const (
|
||||
// DefaultScratchSizeGB is the size of the default LCOW scratch disk in GB
|
||||
DefaultScratchSizeGB = 20
|
||||
|
||||
// defaultVhdxBlockSizeMB is the block-size for the scratch VHDx's this package can create.
|
||||
defaultVhdxBlockSizeMB = 1
|
||||
)
|
55
vendor/github.com/Microsoft/hcsshim/internal/lcow/debug.go
generated
vendored
55
vendor/github.com/Microsoft/hcsshim/internal/lcow/debug.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package lcow
|
||||
|
||||
//func debugCommand(s string) string {
|
||||
// return fmt.Sprintf(`echo -e 'DEBUG COMMAND: %s\\n--------------\\n';%s;echo -e '\\n\\n';`, s, s)
|
||||
//}
|
||||
|
||||
// DebugLCOWGCS extracts logs from the GCS in LCOW. It's a useful hack for debugging,
|
||||
// but not necessarily optimal, but all that is available to us in RS3.
|
||||
//func (container *container) DebugLCOWGCS() {
|
||||
// if logrus.GetLevel() < logrus.DebugLevel || len(os.Getenv("HCSSHIM_LCOW_DEBUG_ENABLE")) == 0 {
|
||||
// return
|
||||
// }
|
||||
|
||||
// var out bytes.Buffer
|
||||
// cmd := os.Getenv("HCSSHIM_LCOW_DEBUG_COMMAND")
|
||||
// if cmd == "" {
|
||||
// cmd = `sh -c "`
|
||||
// cmd += debugCommand("kill -10 `pidof gcs`") // SIGUSR1 for stackdump
|
||||
// cmd += debugCommand("ls -l /tmp")
|
||||
// cmd += debugCommand("cat /tmp/gcs.log")
|
||||
// cmd += debugCommand("cat /tmp/gcs/gcs-stacks*")
|
||||
// cmd += debugCommand("cat /tmp/gcs/paniclog*")
|
||||
// cmd += debugCommand("ls -l /tmp/gcs")
|
||||
// cmd += debugCommand("ls -l /tmp/gcs/*")
|
||||
// cmd += debugCommand("cat /tmp/gcs/*/config.json")
|
||||
// cmd += debugCommand("ls -lR /var/run/gcsrunc")
|
||||
// cmd += debugCommand("cat /tmp/gcs/global-runc.log")
|
||||
// cmd += debugCommand("cat /tmp/gcs/*/runc.log")
|
||||
// cmd += debugCommand("ps -ef")
|
||||
// cmd += `"`
|
||||
// }
|
||||
|
||||
// proc, _, err := container.CreateProcessEx(
|
||||
// &CreateProcessEx{
|
||||
// OCISpecification: &specs.Spec{
|
||||
// Process: &specs.Process{Args: []string{cmd}},
|
||||
// Linux: &specs.Linux{},
|
||||
// },
|
||||
// CreateInUtilityVm: true,
|
||||
// Stdout: &out,
|
||||
// })
|
||||
// defer func() {
|
||||
// if proc != nil {
|
||||
// proc.Kill()
|
||||
// proc.Close()
|
||||
// }
|
||||
// }()
|
||||
// if err != nil {
|
||||
// logrus.Debugln("benign failure getting gcs logs: ", err)
|
||||
// }
|
||||
// if proc != nil {
|
||||
// proc.WaitTimeout(time.Duration(int(time.Second) * 30))
|
||||
// }
|
||||
// logrus.Debugf("GCS Debugging:\n%s\n\nEnd GCS Debugging", strings.TrimSpace(out.String()))
|
||||
//}
|
161
vendor/github.com/Microsoft/hcsshim/internal/lcow/process.go
generated
vendored
161
vendor/github.com/Microsoft/hcsshim/internal/lcow/process.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/copywithtimeout"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ByteCounts are the number of bytes copied to/from standard handles. Note
|
||||
// this is int64 rather than uint64 to match the golang io.Copy() signature.
|
||||
type ByteCounts struct {
|
||||
In int64
|
||||
Out int64
|
||||
Err int64
|
||||
}
|
||||
|
||||
// ProcessOptions are the set of options which are passed to CreateProcessEx() to
|
||||
// create a utility vm.
|
||||
type ProcessOptions struct {
|
||||
HCSSystem *hcs.System
|
||||
Process *specs.Process
|
||||
Stdin io.Reader // Optional reader for sending on to the processes stdin stream
|
||||
Stdout io.Writer // Optional writer for returning the processes stdout stream
|
||||
Stderr io.Writer // Optional writer for returning the processes stderr stream
|
||||
CopyTimeout time.Duration // Timeout for the copy
|
||||
CreateInUtilityVm bool // If the compute system is a utility VM
|
||||
ByteCounts ByteCounts // How much data to copy on each stream if they are supplied. 0 means to io.EOF.
|
||||
}
|
||||
|
||||
// CreateProcess creates a process either in an LCOW utility VM, or for starting
|
||||
// the init process. TODO: Potentially extend for exec'd processes.
|
||||
//
|
||||
// It's essentially a glorified wrapper around hcs.ComputeSystem CreateProcess used
|
||||
// for internal purposes.
|
||||
//
|
||||
// This is used on LCOW to run processes for remote filesystem commands, utilities,
|
||||
// and debugging.
|
||||
//
|
||||
// It optional performs IO copies with timeout between the pipes provided as input,
|
||||
// and the pipes in the process.
|
||||
//
|
||||
// In the ProcessOptions structure, if byte-counts are non-zero, a maximum of those
|
||||
// bytes are copied to the appropriate standard IO reader/writer. When zero,
|
||||
// it copies until EOF. It also returns byte-counts indicating how much data
|
||||
// was sent/received from the process.
|
||||
//
|
||||
// It is the responsibility of the caller to call Close() on the process returned.
|
||||
|
||||
func CreateProcess(opts *ProcessOptions) (*hcs.Process, *ByteCounts, error) {
|
||||
|
||||
var environment = make(map[string]string)
|
||||
copiedByteCounts := &ByteCounts{}
|
||||
|
||||
if opts == nil {
|
||||
return nil, nil, fmt.Errorf("no options supplied")
|
||||
}
|
||||
|
||||
if opts.HCSSystem == nil {
|
||||
return nil, nil, fmt.Errorf("no HCS system supplied")
|
||||
}
|
||||
|
||||
if opts.CreateInUtilityVm && opts.Process == nil {
|
||||
return nil, nil, fmt.Errorf("process must be supplied for UVM process")
|
||||
}
|
||||
|
||||
// Don't pass a process in if this is an LCOW container. This will start the init process.
|
||||
if opts.Process != nil {
|
||||
for _, v := range opts.Process.Env {
|
||||
s := strings.SplitN(v, "=", 2)
|
||||
if len(s) == 2 && len(s[1]) > 0 {
|
||||
environment[s[0]] = s[1]
|
||||
}
|
||||
}
|
||||
if _, ok := environment["PATH"]; !ok {
|
||||
environment["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
|
||||
}
|
||||
}
|
||||
|
||||
processConfig := &ProcessParameters{
|
||||
ProcessParameters: hcsschema.ProcessParameters{
|
||||
CreateStdInPipe: (opts.Stdin != nil),
|
||||
CreateStdOutPipe: (opts.Stdout != nil),
|
||||
CreateStdErrPipe: (opts.Stderr != nil),
|
||||
EmulateConsole: false,
|
||||
},
|
||||
CreateInUtilityVm: opts.CreateInUtilityVm,
|
||||
}
|
||||
|
||||
if opts.Process != nil {
|
||||
processConfig.Environment = environment
|
||||
processConfig.CommandLine = strings.Join(opts.Process.Args, " ")
|
||||
processConfig.WorkingDirectory = opts.Process.Cwd
|
||||
if processConfig.WorkingDirectory == "" {
|
||||
processConfig.WorkingDirectory = `/`
|
||||
}
|
||||
}
|
||||
|
||||
proc, err := opts.HCSSystem.CreateProcess(processConfig)
|
||||
if err != nil {
|
||||
logrus.Debugf("failed to create process: %s", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
processStdin, processStdout, processStderr, err := proc.Stdio()
|
||||
if err != nil {
|
||||
proc.Kill() // Should this have a timeout?
|
||||
proc.Close()
|
||||
return nil, nil, fmt.Errorf("failed to get stdio pipes for process %+v: %s", processConfig, err)
|
||||
}
|
||||
|
||||
// Send the data into the process's stdin
|
||||
if opts.Stdin != nil {
|
||||
if copiedByteCounts.In, err = copywithtimeout.Copy(processStdin,
|
||||
opts.Stdin,
|
||||
opts.ByteCounts.In,
|
||||
"stdin",
|
||||
opts.CopyTimeout); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Don't need stdin now we've sent everything. This signals GCS that we are finished sending data.
|
||||
if err := proc.CloseStdin(); err != nil && !hcs.IsNotExist(err) && !hcs.IsAlreadyClosed(err) {
|
||||
// This error will occur if the compute system is currently shutting down
|
||||
if perr, ok := err.(*hcs.ProcessError); ok && perr.Err != hcs.ErrVmcomputeOperationInvalidState {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the data back from stdout
|
||||
if opts.Stdout != nil {
|
||||
// Copy the data over to the writer.
|
||||
if copiedByteCounts.Out, err = copywithtimeout.Copy(opts.Stdout,
|
||||
processStdout,
|
||||
opts.ByteCounts.Out,
|
||||
"stdout",
|
||||
opts.CopyTimeout); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the data back from stderr
|
||||
if opts.Stderr != nil {
|
||||
// Copy the data over to the writer.
|
||||
if copiedByteCounts.Err, err = copywithtimeout.Copy(opts.Stderr,
|
||||
processStderr,
|
||||
opts.ByteCounts.Err,
|
||||
"stderr",
|
||||
opts.CopyTimeout); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return proc, copiedByteCounts, nil
|
||||
}
|
168
vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go
generated
vendored
168
vendor/github.com/Microsoft/hcsshim/internal/lcow/scratch.go
generated
vendored
@ -1,168 +0,0 @@
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio/vhd"
|
||||
"github.com/Microsoft/hcsshim/internal/copyfile"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/Microsoft/hcsshim/internal/uvm"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CreateScratch uses a utility VM to create an empty scratch disk of a requested size.
|
||||
// It has a caching capability. If the cacheFile exists, and the request is for a default
|
||||
// size, a copy of that is made to the target. If the size is non-default, or the cache file
|
||||
// does not exist, it uses a utility VM to create target. It is the responsibility of the
|
||||
// caller to synchronise simultaneous attempts to create the cache file.
|
||||
func CreateScratch(lcowUVM *uvm.UtilityVM, destFile string, sizeGB uint32, cacheFile string, vmID string) error {
|
||||
|
||||
if lcowUVM == nil {
|
||||
return fmt.Errorf("no uvm")
|
||||
}
|
||||
|
||||
if lcowUVM.OS() != "linux" {
|
||||
return fmt.Errorf("CreateLCOWScratch requires a linux utility VM to operate!")
|
||||
}
|
||||
|
||||
// Smallest we can accept is the default scratch size as we can't size down, only expand.
|
||||
if sizeGB < DefaultScratchSizeGB {
|
||||
sizeGB = DefaultScratchSizeGB
|
||||
}
|
||||
|
||||
logrus.Debugf("hcsshim::CreateLCOWScratch: Dest:%s size:%dGB cache:%s", destFile, sizeGB, cacheFile)
|
||||
|
||||
// Retrieve from cache if the default size and already on disk
|
||||
if cacheFile != "" && sizeGB == DefaultScratchSizeGB {
|
||||
if _, err := os.Stat(cacheFile); err == nil {
|
||||
if err := copyfile.CopyFile(cacheFile, destFile, false); err != nil {
|
||||
return fmt.Errorf("failed to copy cached file '%s' to '%s': %s", cacheFile, destFile, err)
|
||||
}
|
||||
logrus.Debugf("hcsshim::CreateLCOWScratch: %s fulfilled from cache (%s)", destFile, cacheFile)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Create the VHDX
|
||||
if err := vhd.CreateVhdx(destFile, sizeGB, defaultVhdxBlockSizeMB); err != nil {
|
||||
return fmt.Errorf("failed to create VHDx %s: %s", destFile, err)
|
||||
}
|
||||
|
||||
controller, lun, err := lcowUVM.AddSCSI(destFile, "", false) // No destination as not formatted
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("hcsshim::CreateLCOWScratch: %s at C=%d L=%d", destFile, controller, lun)
|
||||
|
||||
// Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory
|
||||
|
||||
startTime := time.Now()
|
||||
for {
|
||||
testdCommand := []string{"test", "-d", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d", controller, lun)}
|
||||
testdProc, _, err := CreateProcess(&ProcessOptions{
|
||||
HCSSystem: lcowUVM.ComputeSystem(),
|
||||
CreateInUtilityVm: true,
|
||||
CopyTimeout: timeout.ExternalCommandToStart,
|
||||
Process: &specs.Process{Args: testdCommand},
|
||||
})
|
||||
if err != nil {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("failed to run %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err)
|
||||
}
|
||||
defer testdProc.Close()
|
||||
|
||||
testdProc.WaitTimeout(timeout.ExternalCommandToComplete)
|
||||
testdExitCode, err := testdProc.ExitCode()
|
||||
if err != nil {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("failed to get exit code from from %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err)
|
||||
}
|
||||
if testdExitCode != 0 {
|
||||
currentTime := time.Now()
|
||||
elapsedTime := currentTime.Sub(startTime)
|
||||
if elapsedTime > timeout.TestDRetryLoop {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", testdCommand, testdExitCode, destFile)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
// Get the device from under the block subdirectory by doing a simple ls. This will come back as (eg) `sda`
|
||||
var lsOutput bytes.Buffer
|
||||
lsCommand := []string{"ls", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", controller, lun)}
|
||||
lsProc, _, err := CreateProcess(&ProcessOptions{
|
||||
HCSSystem: lcowUVM.ComputeSystem(),
|
||||
CreateInUtilityVm: true,
|
||||
CopyTimeout: timeout.ExternalCommandToStart,
|
||||
Process: &specs.Process{Args: lsCommand},
|
||||
Stdout: &lsOutput,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
|
||||
}
|
||||
defer lsProc.Close()
|
||||
lsProc.WaitTimeout(timeout.ExternalCommandToComplete)
|
||||
lsExitCode, err := lsProc.ExitCode()
|
||||
if err != nil {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
|
||||
}
|
||||
if lsExitCode != 0 {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", lsCommand, lsExitCode, destFile)
|
||||
}
|
||||
device := fmt.Sprintf(`/dev/%s`, strings.TrimSpace(lsOutput.String()))
|
||||
logrus.Debugf("hcsshim: CreateExt4Vhdx: %s: device at %s", destFile, device)
|
||||
|
||||
// Format it ext4
|
||||
mkfsCommand := []string{"mkfs.ext4", "-q", "-E", "lazy_itable_init=1", "-O", `^has_journal,sparse_super2,uninit_bg,^resize_inode`, device}
|
||||
var mkfsStderr bytes.Buffer
|
||||
mkfsProc, _, err := CreateProcess(&ProcessOptions{
|
||||
HCSSystem: lcowUVM.ComputeSystem(),
|
||||
CreateInUtilityVm: true,
|
||||
CopyTimeout: timeout.ExternalCommandToStart,
|
||||
Process: &specs.Process{Args: mkfsCommand},
|
||||
Stderr: &mkfsStderr,
|
||||
})
|
||||
if err != nil {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err)
|
||||
}
|
||||
defer mkfsProc.Close()
|
||||
mkfsProc.WaitTimeout(timeout.ExternalCommandToComplete)
|
||||
mkfsExitCode, err := mkfsProc.ExitCode()
|
||||
if err != nil {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err)
|
||||
}
|
||||
if mkfsExitCode != 0 {
|
||||
lcowUVM.RemoveSCSI(destFile)
|
||||
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM: %s", mkfsCommand, mkfsExitCode, destFile, strings.TrimSpace(mkfsStderr.String()))
|
||||
}
|
||||
|
||||
// Hot-Remove before we copy it
|
||||
if err := lcowUVM.RemoveSCSI(destFile); err != nil {
|
||||
return fmt.Errorf("failed to hot-remove: %s", err)
|
||||
}
|
||||
|
||||
// Populate the cache.
|
||||
if cacheFile != "" && (sizeGB == DefaultScratchSizeGB) {
|
||||
if err := copyfile.CopyFile(destFile, cacheFile, true); err != nil {
|
||||
return fmt.Errorf("failed to seed cache '%s' from '%s': %s", destFile, cacheFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("hcsshim::CreateLCOWScratch: %s created (non-cache)", destFile)
|
||||
return nil
|
||||
}
|
46
vendor/github.com/Microsoft/hcsshim/internal/lcow/tar2vhd.go
generated
vendored
46
vendor/github.com/Microsoft/hcsshim/internal/lcow/tar2vhd.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/uvm"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TarToVhd streams a tarstream contained in an io.Reader to a fixed vhd file
|
||||
func TarToVhd(lcowUVM *uvm.UtilityVM, targetVHDFile string, reader io.Reader) (int64, error) {
|
||||
logrus.Debugf("hcsshim: TarToVhd: %s", targetVHDFile)
|
||||
|
||||
if lcowUVM == nil {
|
||||
return 0, fmt.Errorf("no utility VM passed")
|
||||
}
|
||||
|
||||
//defer uvm.DebugLCOWGCS()
|
||||
|
||||
outFile, err := os.Create(targetVHDFile)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("tar2vhd failed to create %s: %s", targetVHDFile, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
// BUGBUG Delete the file on failure
|
||||
|
||||
tar2vhd, byteCounts, err := CreateProcess(&ProcessOptions{
|
||||
HCSSystem: lcowUVM.ComputeSystem(),
|
||||
Process: &specs.Process{Args: []string{"tar2vhd"}},
|
||||
CreateInUtilityVm: true,
|
||||
Stdin: reader,
|
||||
Stdout: outFile,
|
||||
CopyTimeout: 2 * time.Minute,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to start tar2vhd for %s: %s", targetVHDFile, err)
|
||||
}
|
||||
defer tar2vhd.Close()
|
||||
|
||||
logrus.Debugf("hcsshim: TarToVhd: %s created, %d bytes", targetVHDFile, byteCounts.Out)
|
||||
return byteCounts.Out, err
|
||||
}
|
11
vendor/github.com/Microsoft/hcsshim/internal/lcow/types.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/internal/lcow/types.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
package lcow
|
||||
|
||||
import "github.com/Microsoft/hcsshim/internal/schema2"
|
||||
|
||||
// Additional fields to hcsschema.ProcessParameters used by LCOW
|
||||
type ProcessParameters struct {
|
||||
hcsschema.ProcessParameters
|
||||
|
||||
CreateInUtilityVm bool `json:",omitempty"`
|
||||
OCIProcess interface{} `json:"OciProcess,omitempty"`
|
||||
}
|
75
vendor/github.com/Microsoft/hcsshim/internal/lcow/vhd2tar.go
generated
vendored
75
vendor/github.com/Microsoft/hcsshim/internal/lcow/vhd2tar.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
package lcow
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
// "os"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/uvm"
|
||||
// specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
// "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// VhdToTar does what is says - it exports a VHD in a specified
|
||||
// folder (either a read-only layer.vhd, or a read-write scratch vhdx) to a
|
||||
// ReadCloser containing a tar-stream of the layers contents.
|
||||
func VhdToTar(lcowUVM *uvm.UtilityVM, vhdFile string, uvmMountPath string, isContainerScratch bool, vhdSize int64) (io.ReadCloser, error) {
|
||||
return nil, fmt.Errorf("not implemented yet")
|
||||
// logrus.Debugf("hcsshim: VhdToTar: %s isScratch: %t", vhdFile, isContainerScratch)
|
||||
|
||||
// if lcowUVM == nil {
|
||||
// return nil, fmt.Errorf("cannot VhdToTar as no utility VM is in configuration")
|
||||
// }
|
||||
|
||||
// //defer uvm.DebugLCOWGCS()
|
||||
|
||||
// vhdHandle, err := os.Open(vhdFile)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("hcsshim: VhdToTar: failed to open %s: %s", vhdFile, err)
|
||||
// }
|
||||
// defer vhdHandle.Close()
|
||||
// logrus.Debugf("hcsshim: VhdToTar: exporting %s, size %d, isScratch %t", vhdHandle.Name(), vhdSize, isContainerScratch)
|
||||
|
||||
// // Different binary depending on whether a RO layer or a RW scratch
|
||||
// command := "vhd2tar"
|
||||
// if isContainerScratch {
|
||||
// command = fmt.Sprintf("exportSandbox -path %s", uvmMountPath)
|
||||
// }
|
||||
|
||||
// // tar2vhd, byteCounts, err := lcowUVM.CreateProcess(&uvm.ProcessOptions{
|
||||
// // Process: &specs.Process{Args: []string{"tar2vhd"}},
|
||||
// // Stdin: reader,
|
||||
// // Stdout: outFile,
|
||||
// // })
|
||||
|
||||
// // Start the binary in the utility VM
|
||||
// proc, stdin, stdout, _, err := config.createLCOWUVMProcess(command)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to create utils process %s: %s", vhdHandle.Name(), command, err)
|
||||
// }
|
||||
|
||||
// if !isContainerScratch {
|
||||
// // Send the VHD contents to the utility VM processes stdin handle if not a container scratch
|
||||
// logrus.Debugf("hcsshim: VhdToTar: copying the layer VHD into the utility VM")
|
||||
// if _, err = copyWithTimeout(stdin, vhdHandle, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: sending %s to %s", vhdHandle.Name(), command)); err != nil {
|
||||
// proc.Close()
|
||||
// return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to copyWithTimeout on the stdin pipe (to utility VM): %s", vhdHandle.Name(), err)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Start a goroutine which copies the stdout (ie the tar stream)
|
||||
// reader, writer := io.Pipe()
|
||||
// go func() {
|
||||
// defer writer.Close()
|
||||
// defer proc.Close()
|
||||
// logrus.Debugf("hcsshim: VhdToTar: copying tar stream back from the utility VM")
|
||||
// bytes, err := copyWithTimeout(writer, stdout, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: copy tarstream from %s", command))
|
||||
// if err != nil {
|
||||
// logrus.Errorf("hcsshim: VhdToTar: %s: copyWithTimeout on the stdout pipe (from utility VM) failed: %s", vhdHandle.Name(), err)
|
||||
// }
|
||||
// logrus.Debugf("hcsshim: VhdToTar: copied %d bytes of the tarstream of %s from the utility VM", bytes, vhdHandle.Name())
|
||||
// }()
|
||||
|
||||
// // Return the read-side of the pipe connected to the goroutine which is reading from the stdout of the process in the utility VM
|
||||
// return reader, nil
|
||||
}
|
5
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
5
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
@ -26,11 +26,6 @@ const (
|
||||
Uint32 = "uint32"
|
||||
Uint64 = "uint64"
|
||||
|
||||
// HCS
|
||||
|
||||
HCSOperation = "hcs-op"
|
||||
HCSOperationResult = "hcs-op-result"
|
||||
|
||||
// runhcs
|
||||
|
||||
VMShimOperation = "vmshim-op"
|
||||
|
79
vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/export.go
generated
vendored
79
vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/export.go
generated
vendored
@ -1,79 +0,0 @@
|
||||
// Package ociwclayer provides functions for importing and exporting Windows
|
||||
// container layers from and to their OCI tar representation.
|
||||
package ociwclayer
|
||||
|
||||
import (
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Microsoft/go-winio/archive/tar"
|
||||
"github.com/Microsoft/go-winio/backuptar"
|
||||
"github.com/Microsoft/hcsshim"
|
||||
)
|
||||
|
||||
var driverInfo = hcsshim.DriverInfo{}
|
||||
|
||||
// ExportLayer writes an OCI layer tar stream from the provided on-disk layer.
|
||||
// The caller must specify the parent layers, if any, ordered from lowest to
|
||||
// highest layer.
|
||||
//
|
||||
// The layer will be mounted for this process, so the caller should ensure that
|
||||
// it is not currently mounted.
|
||||
func ExportLayer(w io.Writer, path string, parentLayerPaths []string) error {
|
||||
err := hcsshim.ActivateLayer(driverInfo, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer hcsshim.DeactivateLayer(driverInfo, path)
|
||||
|
||||
// Prepare and unprepare the layer to ensure that it has been initialized.
|
||||
err = hcsshim.PrepareLayer(driverInfo, path, parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = hcsshim.UnprepareLayer(driverInfo, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := hcsshim.NewLayerReader(driverInfo, path, parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeTarFromLayer(r, w)
|
||||
cerr := r.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cerr
|
||||
}
|
||||
|
||||
func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
|
||||
t := tar.NewWriter(w)
|
||||
for {
|
||||
name, size, fileInfo, err := r.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fileInfo == nil {
|
||||
// Write a whiteout file.
|
||||
hdr := &tar.Header{
|
||||
Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), whiteoutPrefix+filepath.Base(name))),
|
||||
}
|
||||
err := t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return t.Close()
|
||||
}
|
141
vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/import.go
generated
vendored
141
vendor/github.com/Microsoft/hcsshim/internal/ociwclayer/import.go
generated
vendored
@ -1,141 +0,0 @@
|
||||
package ociwclayer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar"
|
||||
"github.com/Microsoft/go-winio/backuptar"
|
||||
"github.com/Microsoft/hcsshim"
|
||||
)
|
||||
|
||||
const whiteoutPrefix = ".wh."
|
||||
|
||||
var (
|
||||
// mutatedFiles is a list of files that are mutated by the import process
|
||||
// and must be backed up and restored.
|
||||
mutatedFiles = map[string]string{
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
|
||||
}
|
||||
)
|
||||
|
||||
// ImportLayer reads a layer from an OCI layer tar stream and extracts it to the
|
||||
// specified path. The caller must specify the parent layers, if any, ordered
|
||||
// from lowest to highest layer.
|
||||
//
|
||||
// The caller must ensure that the thread or process has acquired backup and
|
||||
// restore privileges.
|
||||
//
|
||||
// This function returns the total size of the layer's files, in bytes.
|
||||
func ImportLayer(r io.Reader, path string, parentLayerPaths []string) (int64, error) {
|
||||
err := os.MkdirAll(path, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
w, err := hcsshim.NewLayerWriter(hcsshim.DriverInfo{}, path, parentLayerPaths)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := writeLayerFromTar(r, w, path)
|
||||
cerr := w.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if cerr != nil {
|
||||
return 0, cerr
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
|
||||
t := tar.NewReader(r)
|
||||
hdr, err := t.Next()
|
||||
totalSize := int64(0)
|
||||
buf := bufio.NewWriter(nil)
|
||||
for err == nil {
|
||||
base := path.Base(hdr.Name)
|
||||
if strings.HasPrefix(base, whiteoutPrefix) {
|
||||
name := path.Join(path.Dir(hdr.Name), base[len(whiteoutPrefix):])
|
||||
err = w.Remove(filepath.FromSlash(name))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hdr, err = t.Next()
|
||||
} else if hdr.Typeflag == tar.TypeLink {
|
||||
err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hdr, err = t.Next()
|
||||
} else {
|
||||
var (
|
||||
name string
|
||||
size int64
|
||||
fileInfo *winio.FileBasicInfo
|
||||
)
|
||||
name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = w.Add(filepath.FromSlash(name), fileInfo)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
|
||||
totalSize += size
|
||||
}
|
||||
}
|
||||
if err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
|
||||
// writes it to a backup stream, and also saves any files that will be mutated
|
||||
// by the import layer process to a backup location.
|
||||
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
|
||||
var bcdBackup *os.File
|
||||
var bcdBackupWriter *winio.BackupFileWriter
|
||||
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
|
||||
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
cerr := bcdBackup.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
|
||||
defer func() {
|
||||
cerr := bcdBackupWriter.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
|
||||
} else {
|
||||
buf.Reset(w)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
ferr := buf.Flush()
|
||||
if err == nil {
|
||||
err = ferr
|
||||
}
|
||||
}()
|
||||
|
||||
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
|
||||
}
|
14
vendor/github.com/Microsoft/hcsshim/internal/ospath/join.go
generated
vendored
14
vendor/github.com/Microsoft/hcsshim/internal/ospath/join.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
package ospath
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Join joins paths using the target OS's path separator.
|
||||
func Join(os string, elem ...string) string {
|
||||
if os == "windows" {
|
||||
return filepath.Join(elem...)
|
||||
}
|
||||
return path.Join(elem...)
|
||||
}
|
185
vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate_test.go
generated
vendored
185
vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate_test.go
generated
vendored
@ -1,185 +0,0 @@
|
||||
package regstate
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testKey = "runhcs-test-test-key"
|
||||
|
||||
func prepTest(t *testing.T) {
|
||||
err := RemoveAll(testKey, true)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLifetime(t *testing.T) {
|
||||
prepTest(t)
|
||||
k, err := Open(testKey, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ids, err := k.Enumerate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ids) != 0 {
|
||||
t.Fatal("wrong count", len(ids))
|
||||
}
|
||||
|
||||
id := "a/b/c"
|
||||
key := "key"
|
||||
err = k.Set(id, key, 1)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
var i int
|
||||
err = k.Get(id, key, &i)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
err = k.Create(id, key, 2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ids, err = k.Enumerate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ids) != 1 {
|
||||
t.Fatal("wrong count", len(ids))
|
||||
}
|
||||
if ids[0] != id {
|
||||
t.Fatal("wrong value", ids[0])
|
||||
}
|
||||
|
||||
err = k.Get(id, key, &i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if i != 2 {
|
||||
t.Fatal("got wrong value", i)
|
||||
}
|
||||
|
||||
err = k.Set(id, key, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = k.Get(id, key, &i)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if i != 3 {
|
||||
t.Fatal("got wrong value", i)
|
||||
}
|
||||
|
||||
err = k.Remove(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = k.Remove(id)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
ids, err = k.Enumerate()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ids) != 0 {
|
||||
t.Fatal("wrong count", len(ids))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBool(t *testing.T) {
|
||||
prepTest(t)
|
||||
k, err := Open(testKey, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := "x"
|
||||
key := "y"
|
||||
err = k.Create(id, key, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := false
|
||||
err = k.Get(id, key, &b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !b {
|
||||
t.Fatal("value did not marshal correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInt(t *testing.T) {
|
||||
prepTest(t)
|
||||
k, err := Open(testKey, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := "x"
|
||||
key := "y"
|
||||
err = k.Create(id, key, 10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
v := 0
|
||||
err = k.Get(id, key, &v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v != 10 {
|
||||
t.Fatal("value did not marshal correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
prepTest(t)
|
||||
k, err := Open(testKey, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := "x"
|
||||
key := "y"
|
||||
err = k.Create(id, key, "blah")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
v := ""
|
||||
err = k.Get(id, key, &v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v != "blah" {
|
||||
t.Fatal("value did not marshal correctly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJson(t *testing.T) {
|
||||
prepTest(t)
|
||||
k, err := Open(testKey, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := "x"
|
||||
key := "y"
|
||||
v := struct{ X int }{5}
|
||||
err = k.Create(id, key, &v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
v.X = 0
|
||||
err = k.Get(id, key, &v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v.X != 5 {
|
||||
t.Fatal("value did not marshal correctly: ", v)
|
||||
}
|
||||
}
|
10
vendor/github.com/Microsoft/hcsshim/internal/requesttype/types.go
generated
vendored
10
vendor/github.com/Microsoft/hcsshim/internal/requesttype/types.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
package requesttype
|
||||
|
||||
// These are constants for v2 schema modify requests.
|
||||
|
||||
// RequestType const
|
||||
const (
|
||||
Add = "Add"
|
||||
Remove = "Remove"
|
||||
PreAdd = "PreAdd" // For networking
|
||||
)
|
17
vendor/github.com/Microsoft/hcsshim/internal/runhcs/util_test.go
generated
vendored
17
vendor/github.com/Microsoft/hcsshim/internal/runhcs/util_test.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package runhcs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_SafePipePath(t *testing.T) {
|
||||
tests := []string{"test", "test with spaces", "test/with\\\\.\\slashes", "test.with..dots..."}
|
||||
expected := []string{"test", "test%20with%20spaces", "test%2Fwith%5C%5C.%5Cslashes", "test.with..dots..."}
|
||||
for i, test := range tests {
|
||||
actual := SafePipePath(test)
|
||||
e := SafePipePrefix + expected[i]
|
||||
if actual != e {
|
||||
t.Fatalf("SafePipePath: actual '%s' != '%s'", actual, expected[i])
|
||||
}
|
||||
}
|
||||
}
|
2
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
@ -87,7 +87,7 @@ func OpenRoot(path string) (*os.File, error) {
|
||||
|
||||
func ntRelativePath(path string) ([]uint16, error) {
|
||||
path = filepath.Clean(path)
|
||||
if strings.Contains(":", path) {
|
||||
if strings.Contains(path, ":") {
|
||||
// Since alternate data streams must follow the file they
|
||||
// are attached to, finding one here (out of order) is invalid.
|
||||
return nil, errors.New("path contains invalid character `:`")
|
||||
|
125
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_admin_test.go
generated
vendored
125
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_admin_test.go
generated
vendored
@ -1,125 +0,0 @@
|
||||
// +build admin
|
||||
|
||||
package safefile
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestOpenRelative(t *testing.T) {
|
||||
badroot, err := tempRoot()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(badroot.Name())
|
||||
defer badroot.Close()
|
||||
|
||||
root, err := tempRoot()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(root.Name())
|
||||
defer root.Close()
|
||||
|
||||
// Create a file
|
||||
f, err := OpenRelative("foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Create a directory
|
||||
err = MkdirRelative("dir", root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a file in the bad root
|
||||
f, err = os.Create(filepath.Join(badroot.Name(), "badfile"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Create a directory symlink to the bad root
|
||||
err = os.Symlink(badroot.Name(), filepath.Join(root.Name(), "dsymlink"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a file symlink to the bad file
|
||||
err = os.Symlink(filepath.Join(badroot.Name(), "badfile"), filepath.Join(root.Name(), "symlink"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure opens cannot happen through the symlink
|
||||
f, err = OpenRelative("dsymlink/foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0)
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("created file in wrong tree!")
|
||||
}
|
||||
t.Log(err)
|
||||
|
||||
// Check again using EnsureNotReparsePointRelative
|
||||
err = EnsureNotReparsePointRelative("dsymlink", root)
|
||||
if err == nil {
|
||||
t.Fatal("reparse check should have failed")
|
||||
}
|
||||
t.Log(err)
|
||||
|
||||
// Make sure links work
|
||||
err = LinkRelative("foo", root, "hardlink", root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Even inside directories
|
||||
err = LinkRelative("foo", root, "dir/bar", root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure links cannot happen through the symlink
|
||||
err = LinkRelative("foo", root, "dsymlink/hardlink", root)
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("created link in wrong tree!")
|
||||
}
|
||||
t.Log(err)
|
||||
|
||||
// In either direction
|
||||
err = LinkRelative("dsymlink/badfile", root, "bar", root)
|
||||
if err == nil {
|
||||
f.Close()
|
||||
t.Fatal("created link in wrong tree!")
|
||||
}
|
||||
t.Log(err)
|
||||
|
||||
// Make sure remove cannot happen through the symlink
|
||||
err = RemoveRelative("symlink/badfile", root)
|
||||
if err == nil {
|
||||
t.Fatal("remove in wrong tree!")
|
||||
}
|
||||
|
||||
// Remove the symlink
|
||||
err = RemoveAllRelative("symlink", root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure it's not possible to escape with .. (NT doesn't support .. at the kernel level)
|
||||
f, err = OpenRelative("..", root, syscall.GENERIC_READ, syscall.FILE_SHARE_READ, FILE_OPEN, 0)
|
||||
if err == nil {
|
||||
t.Fatal("escaped the directory")
|
||||
}
|
||||
t.Log(err)
|
||||
|
||||
// Should not have touched the other directory
|
||||
if _, err = os.Lstat(filepath.Join(badroot.Name(), "badfile")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
53
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_test.go
generated
vendored
53
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen_test.go
generated
vendored
@ -1,53 +0,0 @@
|
||||
package safefile
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
func tempRoot() (*os.File, error) {
|
||||
name, err := ioutil.TempDir("", "hcsshim-test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := OpenRoot(name)
|
||||
if err != nil {
|
||||
os.Remove(name)
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func TestRemoveRelativeReadOnly(t *testing.T) {
|
||||
root, err := tempRoot()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(root.Name())
|
||||
defer root.Close()
|
||||
|
||||
p := filepath.Join(root.Name(), "foo")
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
bi := winio.FileBasicInfo{}
|
||||
bi.FileAttributes = syscall.FILE_ATTRIBUTE_READONLY
|
||||
err = winio.SetFileBasicInfo(f, &bi)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
err = RemoveRelative("foo", root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
81
vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go
generated
vendored
81
vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion.go
generated
vendored
@ -1,81 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package schemaversion
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SchemaV10 makes it easy for callers to get a v1.0 schema version object
|
||||
func SchemaV10() *hcsschema.Version {
|
||||
return &hcsschema.Version{Major: 1, Minor: 0}
|
||||
}
|
||||
|
||||
// SchemaV21 makes it easy for callers to get a v2.1 schema version object
|
||||
func SchemaV21() *hcsschema.Version {
|
||||
return &hcsschema.Version{Major: 2, Minor: 1}
|
||||
}
|
||||
|
||||
// isSupported determines if a given schema version is supported
|
||||
func IsSupported(sv *hcsschema.Version) error {
|
||||
if IsV10(sv) {
|
||||
return nil
|
||||
}
|
||||
if IsV21(sv) {
|
||||
if osversion.Get().Build < osversion.RS5 {
|
||||
return fmt.Errorf("unsupported on this Windows build")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown schema version %s", String(sv))
|
||||
}
|
||||
|
||||
// IsV10 determines if a given schema version object is 1.0. This was the only thing
|
||||
// supported in RS1..3. It lives on in RS5, but will be deprecated in a future release.
|
||||
func IsV10(sv *hcsschema.Version) bool {
|
||||
if sv.Major == 1 && sv.Minor == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsV21 determines if a given schema version object is 2.0. This was introduced in
|
||||
// RS4, but not fully implemented. Recommended for applications using HCS in RS5
|
||||
// onwards.
|
||||
func IsV21(sv *hcsschema.Version) bool {
|
||||
if sv.Major == 2 && sv.Minor == 1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// String returns a JSON encoding of a schema version object
|
||||
func String(sv *hcsschema.Version) string {
|
||||
b, err := json.Marshal(sv)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(b[:])
|
||||
}
|
||||
|
||||
// DetermineSchemaVersion works out what schema version to use based on build and
|
||||
// requested option.
|
||||
func DetermineSchemaVersion(requestedSV *hcsschema.Version) *hcsschema.Version {
|
||||
sv := SchemaV10()
|
||||
if osversion.Get().Build >= osversion.RS5 {
|
||||
sv = SchemaV21()
|
||||
}
|
||||
if requestedSV != nil {
|
||||
if err := IsSupported(requestedSV); err == nil {
|
||||
sv = requestedSV
|
||||
} else {
|
||||
logrus.Warnf("Ignoring unsupported requested schema version %+v", requestedSV)
|
||||
}
|
||||
}
|
||||
return sv
|
||||
}
|
63
vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion_test.go
generated
vendored
63
vendor/github.com/Microsoft/hcsshim/internal/schemaversion/schemaversion_test.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
package schemaversion
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
_ "github.com/Microsoft/hcsshim/test/functional/manifest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
logrus.SetOutput(ioutil.Discard)
|
||||
}
|
||||
|
||||
func TestDetermineSchemaVersion(t *testing.T) {
|
||||
osv := osversion.Get()
|
||||
|
||||
if osv.Build >= osversion.RS5 {
|
||||
if sv := DetermineSchemaVersion(nil); !IsV21(sv) {
|
||||
t.Fatalf("expected v2")
|
||||
}
|
||||
if sv := DetermineSchemaVersion(SchemaV21()); !IsV21(sv) {
|
||||
t.Fatalf("expected requested v2")
|
||||
}
|
||||
if sv := DetermineSchemaVersion(SchemaV10()); !IsV10(sv) {
|
||||
t.Fatalf("expected requested v1")
|
||||
}
|
||||
if sv := DetermineSchemaVersion(&hcsschema.Version{}); !IsV21(sv) {
|
||||
t.Fatalf("expected requested v2")
|
||||
}
|
||||
|
||||
if err := IsSupported(SchemaV21()); err != nil {
|
||||
t.Fatalf("v2 expected to be supported")
|
||||
}
|
||||
if err := IsSupported(SchemaV10()); err != nil {
|
||||
t.Fatalf("v1 expected to be supported")
|
||||
}
|
||||
|
||||
} else {
|
||||
if sv := DetermineSchemaVersion(nil); !IsV10(sv) {
|
||||
t.Fatalf("expected v1")
|
||||
}
|
||||
// Pre RS5 will downgrade to v1 even if request v2
|
||||
if sv := DetermineSchemaVersion(SchemaV21()); !IsV10(sv) {
|
||||
t.Fatalf("expected requested v1")
|
||||
}
|
||||
if sv := DetermineSchemaVersion(SchemaV10()); !IsV10(sv) {
|
||||
t.Fatalf("expected requested v1")
|
||||
}
|
||||
if sv := DetermineSchemaVersion(&hcsschema.Version{}); !IsV10(sv) {
|
||||
t.Fatalf("expected requested v1")
|
||||
}
|
||||
|
||||
if err := IsSupported(SchemaV21()); err == nil {
|
||||
t.Fatalf("didn't expect v2 to be supported")
|
||||
}
|
||||
if err := IsSupported(SchemaV10()); err != nil {
|
||||
t.Fatalf("v1 expected to be supported")
|
||||
}
|
||||
}
|
||||
}
|
19
vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go
generated
vendored
19
vendor/github.com/Microsoft/hcsshim/internal/uvm/constants.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
// MaxVPMEMCount is the maximum number of VPMem devices that may be added to an LCOW
|
||||
// utility VM
|
||||
MaxVPMEMCount = 128
|
||||
|
||||
// DefaultVPMEMCount is the default number of VPMem devices that may be added to an LCOW
|
||||
// utility VM if the create request doesn't specify how many.
|
||||
DefaultVPMEMCount = 64
|
||||
|
||||
// DefaultVPMemSizeBytes is the default size of a VPMem device if the create request
|
||||
// doesn't specify.
|
||||
DefaultVPMemSizeBytes = 4 * 1024 * 1024 * 1024 // 4GB
|
||||
)
|
||||
|
||||
var errNotSupported = fmt.Errorf("not supported")
|
11
vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/internal/uvm/counter.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// ContainerCounter is used for where we layout things for a container in
|
||||
// a utility VM. For WCOW it'll be C:\c\N\. For LCOW it'll be /run/gcs/c/N/.
|
||||
func (uvm *UtilityVM) ContainerCounter() uint64 {
|
||||
return atomic.AddUint64(&uvm.containerCounter, 1)
|
||||
}
|
62
vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go
generated
vendored
62
vendor/github.com/Microsoft/hcsshim/internal/uvm/create.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Options are the set of options passed to Create() to create a utility vm.
|
||||
type Options struct {
|
||||
ID string // Identifier for the uvm. Defaults to generated GUID.
|
||||
Owner string // Specifies the owner. Defaults to executable name.
|
||||
AdditionHCSDocumentJSON string // Optional additional JSON to merge into the HCS document prior
|
||||
|
||||
// MemorySizeInMB sets the UVM memory. If `0` will default to platform
|
||||
// default.
|
||||
MemorySizeInMB int32
|
||||
|
||||
// Memory for UVM. Defaults to true. For physical backed memory, set to
|
||||
// false.
|
||||
AllowOvercommit bool
|
||||
|
||||
// Memory for UVM. Defaults to false. For virtual memory with deferred
|
||||
// commit, set to true.
|
||||
EnableDeferredCommit bool
|
||||
|
||||
// ProcessorCount sets the number of vCPU's. If `0` will default to platform
|
||||
// default.
|
||||
ProcessorCount int32
|
||||
}
|
||||
|
||||
// ID returns the ID of the VM's compute system.
|
||||
func (uvm *UtilityVM) ID() string {
|
||||
return uvm.hcsSystem.ID()
|
||||
}
|
||||
|
||||
// OS returns the operating system of the utility VM.
|
||||
func (uvm *UtilityVM) OS() string {
|
||||
return uvm.operatingSystem
|
||||
}
|
||||
|
||||
// Close terminates and releases resources associated with the utility VM.
|
||||
func (uvm *UtilityVM) Close() error {
|
||||
uvm.Terminate()
|
||||
|
||||
// outputListener will only be nil for a Create -> Stop without a Start. In
|
||||
// this case we have no goroutine processing output so its safe to close the
|
||||
// channel here.
|
||||
if uvm.outputListener != nil {
|
||||
close(uvm.outputProcessingDone)
|
||||
uvm.outputListener.Close()
|
||||
uvm.outputListener = nil
|
||||
}
|
||||
err := uvm.hcsSystem.Close()
|
||||
uvm.hcsSystem = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func defaultProcessorCount() int32 {
|
||||
if runtime.NumCPU() == 1 {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
361
vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go
generated
vendored
361
vendor/github.com/Microsoft/hcsshim/internal/uvm/create_lcow.go
generated
vendored
@ -1,361 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guid"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/mergemaps"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/linuxkit/virtsock/pkg/hvsock"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type PreferredRootFSType int
|
||||
|
||||
const (
|
||||
PreferredRootFSTypeInitRd PreferredRootFSType = iota
|
||||
PreferredRootFSTypeVHD
|
||||
)
|
||||
|
||||
// OutputHandler is used to process the output from the program run in the UVM.
|
||||
type OutputHandler func(io.Reader)
|
||||
|
||||
const (
|
||||
// InitrdFile is the default file name for an initrd.img used to boot LCOW.
|
||||
InitrdFile = "initrd.img"
|
||||
// VhdFile is the default file name for a rootfs.vhd used to boot LCOW.
|
||||
VhdFile = "rootfs.vhd"
|
||||
)
|
||||
|
||||
// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm.
|
||||
type OptionsLCOW struct {
|
||||
*Options
|
||||
|
||||
BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers
|
||||
KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel`
|
||||
KernelDirect bool // Skip UEFI and boot directly to `kernel`
|
||||
RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile`
|
||||
KernelBootOptions string // Additional boot options for the kernel
|
||||
EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM
|
||||
ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe
|
||||
SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1.
|
||||
UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true
|
||||
ExecCommandLine string // The command line to exec from init. Defaults to GCS
|
||||
ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false
|
||||
ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true
|
||||
OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages
|
||||
VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken.
|
||||
VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`.
|
||||
PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD`
|
||||
}
|
||||
|
||||
// NewDefaultOptionsLCOW creates the default options for a bootable version of
|
||||
// LCOW.
|
||||
//
|
||||
// `id` the ID of the compute system. If not passed will generate a new GUID.
|
||||
//
|
||||
// `owner` the owner of the compute system. If not passed will use the
|
||||
// executable files name.
|
||||
func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW {
|
||||
opts := &OptionsLCOW{
|
||||
Options: &Options{
|
||||
ID: id,
|
||||
Owner: owner,
|
||||
MemorySizeInMB: 1024,
|
||||
AllowOvercommit: true,
|
||||
EnableDeferredCommit: false,
|
||||
ProcessorCount: defaultProcessorCount(),
|
||||
},
|
||||
BootFilesPath: filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers"),
|
||||
KernelFile: "kernel",
|
||||
KernelDirect: osversion.Get().Build >= 18286, // Use KernelDirect boot by default on all builds that support it.
|
||||
RootFSFile: InitrdFile,
|
||||
KernelBootOptions: "",
|
||||
EnableGraphicsConsole: false,
|
||||
ConsolePipe: "",
|
||||
SCSIControllerCount: 1,
|
||||
UseGuestConnection: true,
|
||||
ExecCommandLine: fmt.Sprintf("/bin/gcs -log-format json -loglevel %s", logrus.StandardLogger().Level.String()),
|
||||
ForwardStdout: false,
|
||||
ForwardStderr: true,
|
||||
OutputHandler: parseLogrus,
|
||||
VPMemDeviceCount: DefaultVPMEMCount,
|
||||
VPMemSizeBytes: DefaultVPMemSizeBytes,
|
||||
PreferredRootFSType: PreferredRootFSTypeInitRd,
|
||||
}
|
||||
|
||||
if opts.ID == "" {
|
||||
opts.ID = guid.New().String()
|
||||
}
|
||||
if opts.Owner == "" {
|
||||
opts.Owner = filepath.Base(os.Args[0])
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil {
|
||||
// We have a rootfs.vhd in the boot files path. Use it over an initrd.img
|
||||
opts.RootFSFile = VhdFile
|
||||
opts.PreferredRootFSType = PreferredRootFSTypeVHD
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
const linuxLogVsockPort = 109
|
||||
|
||||
// CreateLCOW creates an HCS compute system representing a utility VM.
|
||||
func CreateLCOW(opts *OptionsLCOW) (_ *UtilityVM, err error) {
|
||||
logrus.Debugf("uvm::CreateLCOW %+v", opts)
|
||||
|
||||
// We dont serialize OutputHandler so if it is missing we need to put it back to the default.
|
||||
if opts.OutputHandler == nil {
|
||||
opts.OutputHandler = parseLogrus
|
||||
}
|
||||
|
||||
uvm := &UtilityVM{
|
||||
id: opts.ID,
|
||||
owner: opts.Owner,
|
||||
operatingSystem: "linux",
|
||||
scsiControllerCount: opts.SCSIControllerCount,
|
||||
vpmemMaxCount: opts.VPMemDeviceCount,
|
||||
vpmemMaxSizeBytes: opts.VPMemSizeBytes,
|
||||
}
|
||||
|
||||
kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile)
|
||||
if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath)
|
||||
}
|
||||
rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile)
|
||||
if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath)
|
||||
}
|
||||
|
||||
if opts.SCSIControllerCount > 1 {
|
||||
return nil, fmt.Errorf("SCSI controller count must be 0 or 1") // Future extension here for up to 4
|
||||
}
|
||||
if opts.VPMemDeviceCount > MaxVPMEMCount {
|
||||
return nil, fmt.Errorf("vpmem device count cannot be greater than %d", MaxVPMEMCount)
|
||||
}
|
||||
if uvm.vpmemMaxCount > 0 {
|
||||
if opts.VPMemSizeBytes%4096 != 0 {
|
||||
return nil, fmt.Errorf("opts.VPMemSizeBytes must be a multiple of 4096")
|
||||
}
|
||||
} else {
|
||||
if opts.PreferredRootFSType == PreferredRootFSTypeVHD {
|
||||
return nil, fmt.Errorf("PreferredRootFSTypeVHD requires at least one VPMem device")
|
||||
}
|
||||
}
|
||||
if opts.KernelDirect && osversion.Get().Build < 18286 {
|
||||
return nil, fmt.Errorf("KernelDirectBoot is not support on builds older than 18286")
|
||||
}
|
||||
|
||||
doc := &hcsschema.ComputeSystem{
|
||||
Owner: uvm.owner,
|
||||
SchemaVersion: schemaversion.SchemaV21(),
|
||||
ShouldTerminateOnLastHandleClosed: true,
|
||||
VirtualMachine: &hcsschema.VirtualMachine{
|
||||
StopOnReset: true,
|
||||
Chipset: &hcsschema.Chipset{},
|
||||
ComputeTopology: &hcsschema.Topology{
|
||||
Memory: &hcsschema.Memory2{
|
||||
SizeInMB: opts.MemorySizeInMB,
|
||||
AllowOvercommit: opts.AllowOvercommit,
|
||||
EnableDeferredCommit: opts.EnableDeferredCommit,
|
||||
},
|
||||
Processor: &hcsschema.Processor2{
|
||||
Count: opts.ProcessorCount,
|
||||
},
|
||||
},
|
||||
Devices: &hcsschema.Devices{
|
||||
HvSocket: &hcsschema.HvSocket2{
|
||||
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
|
||||
// Allow administrators and SYSTEM to bind to vsock sockets
|
||||
// so that we can create a GCS log socket.
|
||||
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if opts.UseGuestConnection {
|
||||
doc.VirtualMachine.GuestConnection = &hcsschema.GuestConnection{
|
||||
UseVsock: true,
|
||||
UseConnectedSuspend: true,
|
||||
}
|
||||
}
|
||||
|
||||
if uvm.scsiControllerCount > 0 {
|
||||
// TODO: JTERRY75 - this should enumerate scsicount and add an entry per value.
|
||||
doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{
|
||||
"0": {
|
||||
Attachments: make(map[string]hcsschema.Attachment),
|
||||
},
|
||||
}
|
||||
}
|
||||
if uvm.vpmemMaxCount > 0 {
|
||||
doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{
|
||||
MaximumCount: uvm.vpmemMaxCount,
|
||||
MaximumSizeBytes: uvm.vpmemMaxSizeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
var kernelArgs string
|
||||
switch opts.PreferredRootFSType {
|
||||
case PreferredRootFSTypeInitRd:
|
||||
if !opts.KernelDirect {
|
||||
kernelArgs = "initrd=/" + opts.RootFSFile
|
||||
}
|
||||
case PreferredRootFSTypeVHD:
|
||||
// Support for VPMem VHD(X) booting rather than initrd..
|
||||
kernelArgs = "root=/dev/pmem0 ro init=/init"
|
||||
imageFormat := "Vhd1"
|
||||
if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" {
|
||||
imageFormat = "Vhdx"
|
||||
}
|
||||
doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{
|
||||
"0": {
|
||||
HostPath: rootfsFullPath,
|
||||
ReadOnly: true,
|
||||
ImageFormat: imageFormat,
|
||||
},
|
||||
}
|
||||
if err := wclayer.GrantVmAccess(uvm.id, rootfsFullPath); err != nil {
|
||||
return nil, fmt.Errorf("failed to grantvmaccess to %s: %s", rootfsFullPath, err)
|
||||
}
|
||||
// Add to our internal structure
|
||||
uvm.vpmemDevices[0] = vpmemInfo{
|
||||
hostPath: opts.RootFSFile,
|
||||
uvmPath: "/",
|
||||
refCount: 1,
|
||||
}
|
||||
}
|
||||
|
||||
vmDebugging := false
|
||||
if opts.ConsolePipe != "" {
|
||||
vmDebugging = true
|
||||
kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200"
|
||||
doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{
|
||||
"0": { // Which is actually COM1
|
||||
NamedPipe: opts.ConsolePipe,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
kernelArgs += " 8250_core.nr_uarts=0"
|
||||
}
|
||||
|
||||
if opts.EnableGraphicsConsole {
|
||||
vmDebugging = true
|
||||
kernelArgs += " console=tty"
|
||||
doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{}
|
||||
doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{}
|
||||
doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{}
|
||||
}
|
||||
|
||||
if !vmDebugging {
|
||||
// Terminate the VM if there is a kernel panic.
|
||||
kernelArgs += " panic=-1 quiet"
|
||||
}
|
||||
|
||||
if opts.KernelBootOptions != "" {
|
||||
kernelArgs += " " + opts.KernelBootOptions
|
||||
}
|
||||
|
||||
// With default options, run GCS with stderr pointing to the vsock port
|
||||
// created below in order to forward guest logs to logrus.
|
||||
initArgs := "/bin/vsockexec"
|
||||
|
||||
if opts.ForwardStdout {
|
||||
initArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort)
|
||||
}
|
||||
|
||||
if opts.ForwardStderr {
|
||||
initArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort)
|
||||
}
|
||||
|
||||
initArgs += " " + opts.ExecCommandLine
|
||||
|
||||
if vmDebugging {
|
||||
// Launch a shell on the console.
|
||||
initArgs = `sh -c "` + initArgs + ` & exec sh"`
|
||||
}
|
||||
|
||||
kernelArgs += ` pci=off brd.rd_nr=0 pmtmr=0 -- ` + initArgs
|
||||
|
||||
if !opts.KernelDirect {
|
||||
doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{
|
||||
BootThis: &hcsschema.UefiBootEntry{
|
||||
DevicePath: `\` + opts.KernelFile,
|
||||
DeviceType: "VmbFs",
|
||||
VmbFsRootPath: opts.BootFilesPath,
|
||||
OptionalData: kernelArgs,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{
|
||||
KernelFilePath: kernelFullPath,
|
||||
KernelCmdLine: kernelArgs,
|
||||
}
|
||||
if opts.PreferredRootFSType == PreferredRootFSTypeInitRd {
|
||||
doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath
|
||||
}
|
||||
}
|
||||
|
||||
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
|
||||
}
|
||||
|
||||
hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc)
|
||||
if err != nil {
|
||||
logrus.Debugln("failed to create UVM: ", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uvm.hcsSystem = hcsSystem
|
||||
defer func() {
|
||||
if err != nil {
|
||||
uvm.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a socket that the executed program can send to. This is usually
|
||||
// used by GCS to send log data.
|
||||
if opts.ForwardStdout || opts.ForwardStderr {
|
||||
uvm.outputHandler = opts.OutputHandler
|
||||
uvm.outputProcessingDone = make(chan struct{})
|
||||
uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return uvm, nil
|
||||
}
|
||||
|
||||
func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) {
|
||||
properties, err := uvm.hcsSystem.Properties()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vmID, err := hvsock.GUIDFromString(properties.RuntimeID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceID, _ := hvsock.GUIDFromString("00000000-facb-11e6-bd58-64006a7986d3")
|
||||
binary.LittleEndian.PutUint32(serviceID[0:4], port)
|
||||
return hvsock.Listen(hvsock.Addr{VMID: vmID, ServiceID: serviceID})
|
||||
}
|
||||
|
||||
// PMemMaxSizeBytes returns the maximum size of a PMEM layer (LCOW)
|
||||
func (uvm *UtilityVM) PMemMaxSizeBytes() uint64 {
|
||||
return uvm.vpmemMaxSizeBytes
|
||||
}
|
25
vendor/github.com/Microsoft/hcsshim/internal/uvm/create_test.go
generated
vendored
25
vendor/github.com/Microsoft/hcsshim/internal/uvm/create_test.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Unit tests for negative testing of input to uvm.Create()
|
||||
|
||||
func TestCreateBadBootFilesPath(t *testing.T) {
|
||||
opts := NewDefaultOptionsLCOW(t.Name(), "")
|
||||
opts.BootFilesPath = `c:\does\not\exist\I\hope`
|
||||
|
||||
_, err := CreateLCOW(opts)
|
||||
if err == nil || err.Error() != `kernel: 'c:\does\not\exist\I\hope\kernel' not found` {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateWCOWBadLayerFolders(t *testing.T) {
|
||||
opts := NewDefaultOptionsWCOW(t.Name(), "")
|
||||
_, err := CreateWCOW(opts)
|
||||
if err == nil || (err != nil && err.Error() != `at least 2 LayerFolders must be supplied`) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
186
vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go
generated
vendored
186
vendor/github.com/Microsoft/hcsshim/internal/uvm/create_wcow.go
generated
vendored
@ -1,186 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guid"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/mergemaps"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/schemaversion"
|
||||
"github.com/Microsoft/hcsshim/internal/uvmfolder"
|
||||
"github.com/Microsoft/hcsshim/internal/wcow"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OptionsWCOW are the set of options passed to CreateWCOW() to create a utility vm.
|
||||
type OptionsWCOW struct {
|
||||
*Options
|
||||
|
||||
LayerFolders []string // Set of folders for base layers and scratch. Ordered from top most read-only through base read-only layer, followed by scratch
|
||||
}
|
||||
|
||||
// NewDefaultOptionsWCOW creates the default options for a bootable version of
|
||||
// WCOW. The caller `MUST` set the `LayerFolders` path on the returned value.
|
||||
//
|
||||
// `id` the ID of the compute system. If not passed will generate a new GUID.
|
||||
//
|
||||
// `owner` the owner of the compute system. If not passed will use the
|
||||
// executable files name.
|
||||
func NewDefaultOptionsWCOW(id, owner string) *OptionsWCOW {
|
||||
opts := &OptionsWCOW{
|
||||
Options: &Options{
|
||||
ID: id,
|
||||
Owner: owner,
|
||||
MemorySizeInMB: 1024,
|
||||
AllowOvercommit: true,
|
||||
EnableDeferredCommit: false,
|
||||
ProcessorCount: defaultProcessorCount(),
|
||||
},
|
||||
}
|
||||
|
||||
if opts.ID == "" {
|
||||
opts.ID = guid.New().String()
|
||||
}
|
||||
if opts.Owner == "" {
|
||||
opts.Owner = filepath.Base(os.Args[0])
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// CreateWCOW creates an HCS compute system representing a utility VM.
|
||||
//
|
||||
// WCOW Notes:
|
||||
// - The scratch is always attached to SCSI 0:0
|
||||
//
|
||||
func CreateWCOW(opts *OptionsWCOW) (_ *UtilityVM, err error) {
|
||||
logrus.Debugf("uvm::CreateWCOW %+v", opts)
|
||||
|
||||
if opts.Options == nil {
|
||||
opts.Options = &Options{}
|
||||
}
|
||||
|
||||
uvm := &UtilityVM{
|
||||
id: opts.ID,
|
||||
owner: opts.Owner,
|
||||
operatingSystem: "windows",
|
||||
scsiControllerCount: 1,
|
||||
vsmbShares: make(map[string]*vsmbShare),
|
||||
}
|
||||
|
||||
if len(opts.LayerFolders) < 2 {
|
||||
return nil, fmt.Errorf("at least 2 LayerFolders must be supplied")
|
||||
}
|
||||
uvmFolder, err := uvmfolder.LocateUVMFolder(opts.LayerFolders)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to locate utility VM folder from layer folders: %s", err)
|
||||
}
|
||||
|
||||
// TODO: BUGBUG Remove this. @jhowardmsft
|
||||
// It should be the responsiblity of the caller to do the creation and population.
|
||||
// - Update runhcs too (vm.go).
|
||||
// - Remove comment in function header
|
||||
// - Update tests that rely on this current behaviour.
|
||||
// Create the RW scratch in the top-most layer folder, creating the folder if it doesn't already exist.
|
||||
scratchFolder := opts.LayerFolders[len(opts.LayerFolders)-1]
|
||||
logrus.Debugf("uvm::CreateWCOW scratch folder: %s", scratchFolder)
|
||||
|
||||
// Create the directory if it doesn't exist
|
||||
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
|
||||
logrus.Debugf("uvm::CreateWCOW creating folder: %s ", scratchFolder)
|
||||
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
|
||||
return nil, fmt.Errorf("failed to create utility VM scratch folder: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create sandbox.vhdx in the scratch folder based on the template, granting the correct permissions to it
|
||||
scratchPath := filepath.Join(scratchFolder, "sandbox.vhdx")
|
||||
if _, err := os.Stat(scratchPath); os.IsNotExist(err) {
|
||||
if err := wcow.CreateUVMScratch(uvmFolder, scratchFolder, uvm.id); err != nil {
|
||||
return nil, fmt.Errorf("failed to create scratch: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
doc := &hcsschema.ComputeSystem{
|
||||
Owner: uvm.owner,
|
||||
SchemaVersion: schemaversion.SchemaV21(),
|
||||
ShouldTerminateOnLastHandleClosed: true,
|
||||
VirtualMachine: &hcsschema.VirtualMachine{
|
||||
StopOnReset: true,
|
||||
Chipset: &hcsschema.Chipset{
|
||||
Uefi: &hcsschema.Uefi{
|
||||
BootThis: &hcsschema.UefiBootEntry{
|
||||
DevicePath: `\EFI\Microsoft\Boot\bootmgfw.efi`,
|
||||
DeviceType: "VmbFs",
|
||||
},
|
||||
},
|
||||
},
|
||||
ComputeTopology: &hcsschema.Topology{
|
||||
Memory: &hcsschema.Memory2{
|
||||
SizeInMB: opts.MemorySizeInMB,
|
||||
AllowOvercommit: opts.AllowOvercommit,
|
||||
// EnableHotHint is not compatible with physical.
|
||||
EnableHotHint: opts.AllowOvercommit,
|
||||
EnableDeferredCommit: opts.EnableDeferredCommit,
|
||||
},
|
||||
Processor: &hcsschema.Processor2{
|
||||
Count: defaultProcessorCount(),
|
||||
},
|
||||
},
|
||||
GuestConnection: &hcsschema.GuestConnection{},
|
||||
Devices: &hcsschema.Devices{
|
||||
Scsi: map[string]hcsschema.Scsi{
|
||||
"0": {
|
||||
Attachments: map[string]hcsschema.Attachment{
|
||||
"0": {
|
||||
Path: scratchPath,
|
||||
Type_: "VirtualDisk",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HvSocket: &hcsschema.HvSocket2{
|
||||
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
|
||||
// Allow administrators and SYSTEM to bind to vsock sockets
|
||||
// so that we can create a GCS log socket.
|
||||
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
|
||||
},
|
||||
},
|
||||
VirtualSmb: &hcsschema.VirtualSmb{
|
||||
DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere
|
||||
Shares: []hcsschema.VirtualSmbShare{
|
||||
{
|
||||
Name: "os",
|
||||
Path: filepath.Join(uvmFolder, `UtilityVM\Files`),
|
||||
Options: &hcsschema.VirtualSmbShareOptions{
|
||||
ReadOnly: true,
|
||||
PseudoOplocks: true,
|
||||
TakeBackupPrivilege: true,
|
||||
CacheIo: true,
|
||||
ShareRead: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
uvm.scsiLocations[0][0].hostPath = doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path
|
||||
|
||||
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
|
||||
}
|
||||
|
||||
hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc)
|
||||
if err != nil {
|
||||
logrus.Debugln("failed to create UVM: ", err)
|
||||
return nil, err
|
||||
}
|
||||
uvm.hcsSystem = hcsSystem
|
||||
return uvm, nil
|
||||
}
|
6
vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/uvm/modify.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
package uvm
|
||||
|
||||
// Modifies the compute system by sending a request to HCS
|
||||
func (uvm *UtilityVM) Modify(hcsModificationDocument interface{}) error {
|
||||
return uvm.hcsSystem.Modify(hcsModificationDocument)
|
||||
}
|
251
vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go
generated
vendored
251
vendor/github.com/Microsoft/hcsshim/internal/uvm/network.go
generated
vendored
@ -1,251 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/Microsoft/hcsshim/hcn"
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
"github.com/Microsoft/hcsshim/internal/guid"
|
||||
"github.com/Microsoft/hcsshim/internal/hns"
|
||||
"github.com/Microsoft/hcsshim/internal/requesttype"
|
||||
"github.com/Microsoft/hcsshim/internal/schema1"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// AddNetNS adds network namespace inside the guest & adds endpoints to the guest on that namepace
|
||||
func (uvm *UtilityVM) AddNetNS(id string, endpoints []*hns.HNSEndpoint) (err error) {
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
ns := uvm.namespaces[id]
|
||||
if ns == nil {
|
||||
ns = &namespaceInfo{}
|
||||
|
||||
if uvm.isNetworkNamespaceSupported() {
|
||||
// Add a Guest Network namespace. On LCOW we add the adapters
|
||||
// dynamically.
|
||||
if uvm.operatingSystem == "windows" {
|
||||
hcnNamespace, err := hcn.GetNamespaceByID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
guestNamespace := hcsschema.ModifySettingRequest{
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeNetworkNamespace,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: hcnNamespace,
|
||||
},
|
||||
}
|
||||
if err := uvm.Modify(&guestNamespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if e := uvm.removeNamespaceNICs(ns); e != nil {
|
||||
logrus.Warnf("failed to undo NIC add: %v", e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
for _, endpoint := range endpoints {
|
||||
nicID := guid.New()
|
||||
err = uvm.addNIC(nicID, endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns.nics = append(ns.nics, nicInfo{nicID, endpoint})
|
||||
}
|
||||
if uvm.namespaces == nil {
|
||||
uvm.namespaces = make(map[string]*namespaceInfo)
|
||||
}
|
||||
uvm.namespaces[id] = ns
|
||||
}
|
||||
ns.refCount++
|
||||
return nil
|
||||
}
|
||||
|
||||
//RemoveNetNS removes the namespace information
|
||||
func (uvm *UtilityVM) RemoveNetNS(id string) error {
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
ns := uvm.namespaces[id]
|
||||
if ns == nil || ns.refCount <= 0 {
|
||||
panic(fmt.Errorf("removed a namespace that was not added: %s", id))
|
||||
}
|
||||
|
||||
ns.refCount--
|
||||
|
||||
// Remove the Guest Network namespace
|
||||
if uvm.isNetworkNamespaceSupported() {
|
||||
if uvm.operatingSystem == "windows" {
|
||||
hcnNamespace, err := hcn.GetNamespaceByID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
guestNamespace := hcsschema.ModifySettingRequest{
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeNetworkNamespace,
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: hcnNamespace,
|
||||
},
|
||||
}
|
||||
if err := uvm.Modify(&guestNamespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if ns.refCount == 0 {
|
||||
err = uvm.removeNamespaceNICs(ns)
|
||||
delete(uvm.namespaces, id)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// IsNetworkNamespaceSupported returns bool value specifying if network namespace is supported inside the guest
|
||||
func (uvm *UtilityVM) isNetworkNamespaceSupported() bool {
|
||||
p, err := uvm.ComputeSystem().Properties(schema1.PropertyTypeGuestConnection)
|
||||
if err == nil {
|
||||
return p.GuestConnectionInfo.GuestDefinedCapabilities.NamespaceAddRequestSupported
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (uvm *UtilityVM) removeNamespaceNICs(ns *namespaceInfo) error {
|
||||
for len(ns.nics) != 0 {
|
||||
nic := ns.nics[len(ns.nics)-1]
|
||||
err := uvm.removeNIC(nic.ID, nic.Endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ns.nics = ns.nics[:len(ns.nics)-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNetworkModifyRequest(adapterID string, requestType string, settings interface{}) interface{} {
|
||||
if osversion.Get().Build >= osversion.RS5 {
|
||||
return guestrequest.NetworkModifyRequest{
|
||||
AdapterId: adapterID,
|
||||
RequestType: requestType,
|
||||
Settings: settings,
|
||||
}
|
||||
}
|
||||
return guestrequest.RS4NetworkModifyRequest{
|
||||
AdapterInstanceId: adapterID,
|
||||
RequestType: requestType,
|
||||
Settings: settings,
|
||||
}
|
||||
}
|
||||
|
||||
func (uvm *UtilityVM) addNIC(id guid.GUID, endpoint *hns.HNSEndpoint) error {
|
||||
|
||||
// First a pre-add. This is a guest-only request and is only done on Windows.
|
||||
if uvm.operatingSystem == "windows" {
|
||||
preAddRequest := hcsschema.ModifySettingRequest{
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeNetwork,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: getNetworkModifyRequest(
|
||||
id.String(),
|
||||
requesttype.PreAdd,
|
||||
endpoint),
|
||||
},
|
||||
}
|
||||
if err := uvm.Modify(&preAddRequest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Then the Add itself
|
||||
request := hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Add,
|
||||
ResourcePath: path.Join("VirtualMachine/Devices/NetworkAdapters", id.String()),
|
||||
Settings: hcsschema.NetworkAdapter{
|
||||
EndpointId: endpoint.Id,
|
||||
MacAddress: endpoint.MacAddress,
|
||||
},
|
||||
}
|
||||
|
||||
if uvm.operatingSystem == "windows" {
|
||||
request.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeNetwork,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: getNetworkModifyRequest(
|
||||
id.String(),
|
||||
requesttype.Add,
|
||||
nil),
|
||||
}
|
||||
} else {
|
||||
// Verify this version of LCOW supports Network HotAdd
|
||||
if uvm.isNetworkNamespaceSupported() {
|
||||
request.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeNetwork,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: &guestrequest.LCOWNetworkAdapter{
|
||||
NamespaceID: endpoint.Namespace.ID,
|
||||
ID: id.String(),
|
||||
MacAddress: endpoint.MacAddress,
|
||||
IPAddress: endpoint.IPAddress.String(),
|
||||
PrefixLength: endpoint.PrefixLength,
|
||||
GatewayAddress: endpoint.GatewayAddress,
|
||||
DNSSuffix: endpoint.DNSSuffix,
|
||||
DNSServerList: endpoint.DNSServerList,
|
||||
EnableLowMetric: endpoint.EnableLowMetric,
|
||||
EncapOverhead: endpoint.EncapOverhead,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := uvm.Modify(&request); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (uvm *UtilityVM) removeNIC(id guid.GUID, endpoint *hns.HNSEndpoint) error {
|
||||
request := hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Remove,
|
||||
ResourcePath: path.Join("VirtualMachine/Devices/NetworkAdapters", id.String()),
|
||||
Settings: hcsschema.NetworkAdapter{
|
||||
EndpointId: endpoint.Id,
|
||||
MacAddress: endpoint.MacAddress,
|
||||
},
|
||||
}
|
||||
|
||||
if uvm.operatingSystem == "windows" {
|
||||
request.GuestRequest = hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: getNetworkModifyRequest(
|
||||
id.String(),
|
||||
requesttype.Remove,
|
||||
nil),
|
||||
}
|
||||
} else {
|
||||
// Verify this version of LCOW supports Network HotRemove
|
||||
if uvm.isNetworkNamespaceSupported() {
|
||||
request.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeNetwork,
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: &guestrequest.LCOWNetworkAdapter{
|
||||
NamespaceID: endpoint.Namespace.ID,
|
||||
ID: endpoint.Id,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := uvm.Modify(&request); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
133
vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go
generated
vendored
133
vendor/github.com/Microsoft/hcsshim/internal/uvm/plan9.go
generated
vendored
@ -1,133 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/requesttype"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// AddPlan9 adds a Plan9 share to a utility VM. Each Plan9 share is ref-counted and
|
||||
// only added if it isn't already.
|
||||
func (uvm *UtilityVM) AddPlan9(hostPath string, uvmPath string, readOnly bool) error {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logfields.UVMID: uvm.id,
|
||||
"host-path": hostPath,
|
||||
"uvm-path": uvmPath,
|
||||
"readOnly": readOnly,
|
||||
}).Debug("uvm::AddPlan9")
|
||||
|
||||
if uvm.operatingSystem != "linux" {
|
||||
return errNotSupported
|
||||
}
|
||||
if uvmPath == "" {
|
||||
return fmt.Errorf("uvmPath must be passed to AddPlan9")
|
||||
}
|
||||
|
||||
// TODO: JTERRY75 - These are marked private in the schema. For now use them
|
||||
// but when there are public variants we need to switch to them.
|
||||
const (
|
||||
shareFlagsReadOnly int32 = 0x00000001
|
||||
shareFlagsLinuxMetadata int32 = 0x00000004
|
||||
shareFlagsCaseSensitive int32 = 0x00000008
|
||||
)
|
||||
|
||||
flags := shareFlagsLinuxMetadata | shareFlagsCaseSensitive
|
||||
if readOnly {
|
||||
flags |= shareFlagsReadOnly
|
||||
}
|
||||
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
if uvm.plan9Shares == nil {
|
||||
uvm.plan9Shares = make(map[string]*plan9Info)
|
||||
}
|
||||
if _, ok := uvm.plan9Shares[hostPath]; !ok {
|
||||
uvm.plan9Counter++
|
||||
|
||||
modification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Add,
|
||||
Settings: hcsschema.Plan9Share{
|
||||
Name: fmt.Sprintf("%d", uvm.plan9Counter),
|
||||
Path: hostPath,
|
||||
Port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999)
|
||||
Flags: flags,
|
||||
},
|
||||
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Plan9/Shares"),
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeMappedDirectory,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: guestrequest.LCOWMappedDirectory{
|
||||
MountPath: uvmPath,
|
||||
Port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999)
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := uvm.Modify(modification); err != nil {
|
||||
return err
|
||||
}
|
||||
uvm.plan9Shares[hostPath] = &plan9Info{
|
||||
refCount: 1,
|
||||
uvmPath: uvmPath,
|
||||
idCounter: uvm.plan9Counter,
|
||||
port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999)
|
||||
}
|
||||
} else {
|
||||
uvm.plan9Shares[hostPath].refCount++
|
||||
}
|
||||
logrus.Debugf("hcsshim::AddPlan9 Success %s: refcount=%d %+v", hostPath, uvm.plan9Shares[hostPath].refCount, uvm.plan9Shares[hostPath])
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePlan9 removes a Plan9 share from a utility VM. Each Plan9 share is ref-counted
|
||||
// and only actually removed when the ref-count drops to zero.
|
||||
func (uvm *UtilityVM) RemovePlan9(hostPath string) error {
|
||||
if uvm.operatingSystem != "linux" {
|
||||
return errNotSupported
|
||||
}
|
||||
logrus.Debugf("uvm::RemovePlan9 %s id:%s", hostPath, uvm.id)
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
if _, ok := uvm.plan9Shares[hostPath]; !ok {
|
||||
return fmt.Errorf("%s is not present as a Plan9 share in %s, cannot remove", hostPath, uvm.id)
|
||||
}
|
||||
return uvm.removePlan9(hostPath, uvm.plan9Shares[hostPath].uvmPath)
|
||||
}
|
||||
|
||||
// removePlan9 is the internally callable "unsafe" version of RemovePlan9. The mutex
|
||||
// MUST be held when calling this function.
|
||||
func (uvm *UtilityVM) removePlan9(hostPath, uvmPath string) error {
|
||||
uvm.plan9Shares[hostPath].refCount--
|
||||
if uvm.plan9Shares[hostPath].refCount > 0 {
|
||||
logrus.Debugf("uvm::RemovePlan9 Success %s id:%s Ref-count now %d. It is still present in the utility VM", hostPath, uvm.id, uvm.plan9Shares[hostPath].refCount)
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("uvm::RemovePlan9 Zero ref-count, removing. %s id:%s", hostPath, uvm.id)
|
||||
modification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: hcsschema.Plan9Share{
|
||||
Name: fmt.Sprintf("%d", uvm.plan9Shares[hostPath].idCounter),
|
||||
Port: uvm.plan9Shares[hostPath].port,
|
||||
},
|
||||
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Plan9/Shares"),
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeMappedDirectory,
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: guestrequest.LCOWMappedDirectory{
|
||||
MountPath: uvm.plan9Shares[hostPath].uvmPath,
|
||||
Port: uvm.plan9Shares[hostPath].port,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := uvm.Modify(modification); err != nil {
|
||||
return fmt.Errorf("failed to remove plan9 share %s from %s: %+v: %s", hostPath, uvm.id, modification, err)
|
||||
}
|
||||
delete(uvm.plan9Shares, hostPath)
|
||||
logrus.Debugf("uvm::RemovePlan9 Success %s id:%s successfully removed from utility VM", hostPath, uvm.id)
|
||||
return nil
|
||||
}
|
318
vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go
generated
vendored
318
vendor/github.com/Microsoft/hcsshim/internal/uvm/scsi.go
generated
vendored
@ -1,318 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/requesttype"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoAvailableLocation = fmt.Errorf("no available location")
|
||||
ErrNotAttached = fmt.Errorf("not attached")
|
||||
ErrAlreadyAttached = fmt.Errorf("already attached")
|
||||
ErrNoSCSIControllers = fmt.Errorf("no SCSI controllers configured for this utility VM")
|
||||
ErrTooManyAttachments = fmt.Errorf("too many SCSI attachments")
|
||||
ErrSCSILayerWCOWUnsupported = fmt.Errorf("SCSI attached layers are not supported for WCOW")
|
||||
)
|
||||
|
||||
// allocateSCSI finds the next available slot on the
|
||||
// SCSI controllers associated with a utility VM to use.
|
||||
// Lock must be held when calling this function
|
||||
func (uvm *UtilityVM) allocateSCSI(hostPath string, uvmPath string, isLayer bool) (int, int32, error) {
|
||||
for controller, luns := range uvm.scsiLocations {
|
||||
for lun, si := range luns {
|
||||
if si.hostPath == "" {
|
||||
uvm.scsiLocations[controller][lun].hostPath = hostPath
|
||||
uvm.scsiLocations[controller][lun].uvmPath = uvmPath
|
||||
uvm.scsiLocations[controller][lun].isLayer = isLayer
|
||||
if isLayer {
|
||||
uvm.scsiLocations[controller][lun].refCount = 1
|
||||
}
|
||||
logrus.Debugf("uvm::allocateSCSI %d:%d %q %q", controller, lun, hostPath, uvmPath)
|
||||
return controller, int32(lun), nil
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, -1, ErrNoAvailableLocation
|
||||
}
|
||||
|
||||
func (uvm *UtilityVM) deallocateSCSI(controller int, lun int32) error {
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
logrus.Debugf("uvm::deallocateSCSI %d:%d %+v", controller, lun, uvm.scsiLocations[controller][lun])
|
||||
uvm.scsiLocations[controller][lun] = scsiInfo{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock must be held when calling this function.
|
||||
func (uvm *UtilityVM) findSCSIAttachment(findThisHostPath string) (int, int32, string, error) {
|
||||
for controller, luns := range uvm.scsiLocations {
|
||||
for lun, si := range luns {
|
||||
if si.hostPath == findThisHostPath {
|
||||
logrus.Debugf("uvm::findSCSIAttachment %d:%d %+v", controller, lun, si)
|
||||
return controller, int32(lun), si.uvmPath, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, -1, "", ErrNotAttached
|
||||
}
|
||||
|
||||
// AddSCSI adds a SCSI disk to a utility VM at the next available location. This
|
||||
// function should be called for a RW/scratch layer or a passthrough vhd/vhdx.
|
||||
// For read-only layers on LCOW as an alternate to PMEM for large layers, use
|
||||
// AddSCSILayer instead.
|
||||
//
|
||||
// `hostPath` is required and must point to a vhd/vhdx path.
|
||||
//
|
||||
// `uvmPath` is optional.
|
||||
//
|
||||
// `readOnly` set to `true` if the vhd/vhdx should be attached read only.
|
||||
func (uvm *UtilityVM) AddSCSI(hostPath string, uvmPath string, readOnly bool) (int, int32, error) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logfields.UVMID: uvm.id,
|
||||
"host-path": hostPath,
|
||||
"uvm-path": uvmPath,
|
||||
"readOnly": readOnly,
|
||||
}).Debug("uvm::AddSCSI")
|
||||
|
||||
return uvm.addSCSIActual(hostPath, uvmPath, "VirtualDisk", false, readOnly)
|
||||
}
|
||||
|
||||
// AddSCSIPhysicalDisk attaches a physical disk from the host directly to the
|
||||
// Utility VM at the next available location.
|
||||
//
|
||||
// `hostPath` is required and `likely` start's with `\\.\PHYSICALDRIVE`.
|
||||
//
|
||||
// `uvmPath` is optional if a guest mount is not requested.
|
||||
//
|
||||
// `readOnly` set to `true` if the physical disk should be attached read only.
|
||||
func (uvm *UtilityVM) AddSCSIPhysicalDisk(hostPath, uvmPath string, readOnly bool) (int, int32, error) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logfields.UVMID: uvm.id,
|
||||
"host-path": hostPath,
|
||||
"uvm-path": uvmPath,
|
||||
"readOnly": readOnly,
|
||||
}).Debug("uvm::AddSCSIPhysicalDisk")
|
||||
|
||||
return uvm.addSCSIActual(hostPath, uvmPath, "PassThru", false, readOnly)
|
||||
}
|
||||
|
||||
// AddSCSILayer adds a read-only layer disk to a utility VM at the next available
|
||||
// location. This function is used by LCOW as an alternate to PMEM for large layers.
|
||||
// The UVMPath will always be /tmp/S<controller>/<lun>.
|
||||
func (uvm *UtilityVM) AddSCSILayer(hostPath string) (int, int32, error) {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
logfields.UVMID: uvm.id,
|
||||
"host-path": hostPath,
|
||||
}).Debug("uvm::AddSCSILayer")
|
||||
|
||||
if uvm.operatingSystem == "windows" {
|
||||
return -1, -1, ErrSCSILayerWCOWUnsupported
|
||||
}
|
||||
|
||||
return uvm.addSCSIActual(hostPath, "", "VirtualDisk", true, true)
|
||||
}
|
||||
|
||||
// addSCSIActual is the implementation behind the external functions AddSCSI and
|
||||
// AddSCSILayer.
|
||||
//
|
||||
// We are in control of everything ourselves. Hence we have ref- counting and
|
||||
// so-on tracking what SCSI locations are available or used.
|
||||
//
|
||||
// `hostPath` is required and may be a vhd/vhdx or physical disk path.
|
||||
//
|
||||
// `uvmPath` is optional, and `must` be empty for layers. If `!isLayer` and
|
||||
// `uvmPath` is empty no guest modify will take place.
|
||||
//
|
||||
// `attachmentType` is required and `must` be `VirtualDisk` for vhd/vhdx
|
||||
// attachments and `PassThru` for physical disk.
|
||||
//
|
||||
// `isLayer` indicates that this is a read-only (LCOW) layer VHD. This parameter
|
||||
// `must not` be used for Windows.
|
||||
//
|
||||
// `readOnly` indicates the attachment should be added read only.
|
||||
//
|
||||
// Returns the controller ID (0..3) and LUN (0..63) where the disk is attached.
|
||||
func (uvm *UtilityVM) addSCSIActual(hostPath, uvmPath, attachmentType string, isLayer, readOnly bool) (int, int32, error) {
|
||||
if uvm.scsiControllerCount == 0 {
|
||||
return -1, -1, ErrNoSCSIControllers
|
||||
}
|
||||
|
||||
// Ensure the utility VM has access
|
||||
if err := wclayer.GrantVmAccess(uvm.ID(), hostPath); err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
// We must hold the lock throughout the lookup (findSCSIAttachment) until
|
||||
// after the possible allocation (allocateSCSI) has been completed to ensure
|
||||
// there isn't a race condition for it being attached by another thread between
|
||||
// these two operations. All failure paths between these two must release
|
||||
// the lock.
|
||||
uvm.m.Lock()
|
||||
if controller, lun, _, err := uvm.findSCSIAttachment(hostPath); err == nil {
|
||||
// So is attached
|
||||
if isLayer {
|
||||
// Increment the refcount
|
||||
uvm.scsiLocations[controller][lun].refCount++
|
||||
logrus.Debugf("uvm::AddSCSI id:%s hostPath:%s refCount now %d", uvm.id, hostPath, uvm.scsiLocations[controller][lun].refCount)
|
||||
uvm.m.Unlock()
|
||||
return controller, int32(lun), nil
|
||||
}
|
||||
|
||||
uvm.m.Unlock()
|
||||
return -1, -1, ErrAlreadyAttached
|
||||
}
|
||||
|
||||
// At this point, we know it's not attached, regardless of whether it's a
|
||||
// ref-counted layer VHD, or not.
|
||||
controller, lun, err := uvm.allocateSCSI(hostPath, uvmPath, isLayer)
|
||||
if err != nil {
|
||||
uvm.m.Unlock()
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
// Auto-generate the UVM path for LCOW layers
|
||||
if isLayer {
|
||||
uvmPath = fmt.Sprintf("/tmp/S%d/%d", controller, lun)
|
||||
}
|
||||
|
||||
// See comment higher up. Now safe to release the lock.
|
||||
uvm.m.Unlock()
|
||||
|
||||
// Note: Can remove this check post-RS5 if multiple controllers are supported
|
||||
if controller > 0 {
|
||||
uvm.deallocateSCSI(controller, lun)
|
||||
return -1, -1, ErrTooManyAttachments
|
||||
}
|
||||
|
||||
SCSIModification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Add,
|
||||
Settings: hcsschema.Attachment{
|
||||
Path: hostPath,
|
||||
Type_: attachmentType,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Scsi/%d/Attachments/%d", controller, lun),
|
||||
}
|
||||
|
||||
if uvmPath != "" {
|
||||
if uvm.operatingSystem == "windows" {
|
||||
SCSIModification.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: guestrequest.WCOWMappedVirtualDisk{
|
||||
ContainerPath: uvmPath,
|
||||
Lun: lun,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
SCSIModification.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: guestrequest.LCOWMappedVirtualDisk{
|
||||
MountPath: uvmPath,
|
||||
Lun: uint8(lun),
|
||||
Controller: uint8(controller),
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := uvm.Modify(SCSIModification); err != nil {
|
||||
uvm.deallocateSCSI(controller, lun)
|
||||
return -1, -1, fmt.Errorf("uvm::AddSCSI: failed to modify utility VM configuration: %s", err)
|
||||
}
|
||||
logrus.Debugf("uvm::AddSCSI id:%s hostPath:%s added at %d:%d", uvm.id, hostPath, controller, lun)
|
||||
return controller, int32(lun), nil
|
||||
|
||||
}
|
||||
|
||||
// RemoveSCSI removes a SCSI disk from a utility VM. As an external API, it
|
||||
// is "safe". Internal use can call removeSCSI.
|
||||
func (uvm *UtilityVM) RemoveSCSI(hostPath string) error {
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
|
||||
if uvm.scsiControllerCount == 0 {
|
||||
return ErrNoSCSIControllers
|
||||
}
|
||||
|
||||
// Make sure is actually attached
|
||||
controller, lun, uvmPath, err := uvm.findSCSIAttachment(hostPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if uvm.scsiLocations[controller][lun].isLayer {
|
||||
uvm.scsiLocations[controller][lun].refCount--
|
||||
if uvm.scsiLocations[controller][lun].refCount > 0 {
|
||||
logrus.Debugf("uvm::RemoveSCSI: refCount now %d: %s %s %d:%d", uvm.scsiLocations[controller][lun].refCount, hostPath, uvm.id, controller, lun)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := uvm.removeSCSI(hostPath, uvmPath, controller, lun); err != nil {
|
||||
return fmt.Errorf("failed to remove SCSI disk %s from container %s: %s", hostPath, uvm.id, err)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeSCSI is the internally callable "unsafe" version of RemoveSCSI. The mutex
|
||||
// MUST be held when calling this function.
|
||||
func (uvm *UtilityVM) removeSCSI(hostPath string, uvmPath string, controller int, lun int32) error {
|
||||
logrus.Debugf("uvm::RemoveSCSI id:%s hostPath:%s", uvm.id, hostPath)
|
||||
scsiModification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Remove,
|
||||
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Scsi/%d/Attachments/%d", controller, lun),
|
||||
}
|
||||
|
||||
// Include the GuestRequest so that the GCS ejects the disk cleanly if the disk was attached/mounted
|
||||
if uvmPath != "" {
|
||||
if uvm.operatingSystem == "windows" {
|
||||
scsiModification.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: guestrequest.WCOWMappedVirtualDisk{
|
||||
ContainerPath: uvmPath,
|
||||
Lun: lun,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
scsiModification.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: guestrequest.LCOWMappedVirtualDisk{
|
||||
MountPath: uvmPath, // May be blank in attach-only
|
||||
Lun: uint8(lun),
|
||||
Controller: uint8(controller),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := uvm.Modify(scsiModification); err != nil {
|
||||
return err
|
||||
}
|
||||
uvm.scsiLocations[controller][lun] = scsiInfo{}
|
||||
logrus.Debugf("uvm::RemoveSCSI: Success %s removed from %s %d:%d", hostPath, uvm.id, controller, lun)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetScsiUvmPath returns the guest mounted path of a SCSI drive.
|
||||
//
|
||||
// If `hostPath` is not mounted returns `ErrNotAttached`.
|
||||
func (uvm *UtilityVM) GetScsiUvmPath(hostPath string) (string, error) {
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
|
||||
_, _, uvmPath, err := uvm.findSCSIAttachment(hostPath)
|
||||
return uvmPath, err
|
||||
}
|
98
vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go
generated
vendored
98
vendor/github.com/Microsoft/hcsshim/internal/uvm/start.go
generated
vendored
@ -1,98 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const _ERROR_CONNECTION_ABORTED syscall.Errno = 1236
|
||||
|
||||
var _ = (OutputHandler)(parseLogrus)
|
||||
|
||||
func parseLogrus(r io.Reader) {
|
||||
j := json.NewDecoder(r)
|
||||
logger := logrus.StandardLogger()
|
||||
for {
|
||||
e := logrus.Entry{Logger: logger}
|
||||
err := j.Decode(&e.Data)
|
||||
if err == io.EOF || err == _ERROR_CONNECTION_ABORTED {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// Something went wrong. Read the rest of the data as a single
|
||||
// string and log it at once -- it's probably a GCS panic stack.
|
||||
logrus.Error("gcs log read: ", err)
|
||||
rest, _ := ioutil.ReadAll(io.MultiReader(j.Buffered(), r))
|
||||
if len(rest) != 0 {
|
||||
logrus.Error("gcs stderr: ", string(rest))
|
||||
}
|
||||
break
|
||||
}
|
||||
msg := e.Data["msg"]
|
||||
delete(e.Data, "msg")
|
||||
lvl := e.Data["level"]
|
||||
delete(e.Data, "level")
|
||||
e.Data["vm.time"] = e.Data["time"]
|
||||
delete(e.Data, "time")
|
||||
switch lvl {
|
||||
case "debug":
|
||||
e.Debug(msg)
|
||||
case "info":
|
||||
e.Info(msg)
|
||||
case "warning":
|
||||
e.Warning(msg)
|
||||
case "error", "fatal":
|
||||
e.Error(msg)
|
||||
default:
|
||||
e.Info(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type acceptResult struct {
|
||||
c net.Conn
|
||||
err error
|
||||
}
|
||||
|
||||
func processOutput(ctx context.Context, l net.Listener, doneChan chan struct{}, handler OutputHandler) {
|
||||
defer close(doneChan)
|
||||
|
||||
ch := make(chan acceptResult)
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
ch <- acceptResult{c, err}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
l.Close()
|
||||
return
|
||||
case ar := <-ch:
|
||||
c, err := ar.c, ar.err
|
||||
l.Close()
|
||||
if err != nil {
|
||||
logrus.Error("accepting log socket: ", err)
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
handler(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Start synchronously starts the utility VM.
|
||||
func (uvm *UtilityVM) Start() error {
|
||||
if uvm.outputListener != nil {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go processOutput(ctx, uvm.outputListener, uvm.outputProcessingDone, uvm.outputHandler)
|
||||
uvm.outputProcessingCancel = cancel
|
||||
uvm.outputListener = nil
|
||||
}
|
||||
return uvm.hcsSystem.Start()
|
||||
}
|
7
vendor/github.com/Microsoft/hcsshim/internal/uvm/system.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/uvm/system.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import "github.com/Microsoft/hcsshim/internal/hcs"
|
||||
|
||||
func (uvm *UtilityVM) ComputeSystem() *hcs.System {
|
||||
return uvm.hcsSystem
|
||||
}
|
7
vendor/github.com/Microsoft/hcsshim/internal/uvm/terminate.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/uvm/terminate.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package uvm
|
||||
|
||||
// Terminate requests a utility VM terminate. If IsPending() on the error returned is true,
|
||||
// it may not actually be shut down until Wait() succeeds.
|
||||
func (uvm *UtilityVM) Terminate() error {
|
||||
return uvm.hcsSystem.Terminate()
|
||||
}
|
105
vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go
generated
vendored
105
vendor/github.com/Microsoft/hcsshim/internal/uvm/types.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
package uvm
|
||||
|
||||
// This package describes the external interface for utility VMs.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guid"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs"
|
||||
"github.com/Microsoft/hcsshim/internal/hns"
|
||||
)
|
||||
|
||||
// | WCOW | LCOW
|
||||
// Container scratch | SCSI | SCSI
|
||||
// Scratch space | ---- | SCSI // For file system utilities. /tmp/scratch
|
||||
// Read-Only Layer | VSMB | VPMEM
|
||||
// Mapped Directory | VSMB | PLAN9
|
||||
|
||||
// vsmbShare is an internal structure used for ref-counting VSMB shares mapped to a Windows utility VM.
|
||||
type vsmbShare struct {
|
||||
refCount uint32
|
||||
name string
|
||||
guestRequest interface{}
|
||||
}
|
||||
|
||||
// scsiInfo is an internal structure used for determining what is mapped to a utility VM.
|
||||
// hostPath is required. uvmPath may be blank.
|
||||
type scsiInfo struct {
|
||||
hostPath string
|
||||
uvmPath string
|
||||
|
||||
// While most VHDs attached to SCSI are scratch spaces, in the case of LCOW
|
||||
// when the size is over the size possible to attach to PMEM, we use SCSI for
|
||||
// read-only layers. As RO layers are shared, we perform ref-counting.
|
||||
isLayer bool
|
||||
refCount uint32
|
||||
}
|
||||
|
||||
// vpmemInfo is an internal structure used for determining VPMem devices mapped to
|
||||
// a Linux utility VM.
|
||||
type vpmemInfo struct {
|
||||
hostPath string
|
||||
uvmPath string
|
||||
refCount uint32
|
||||
}
|
||||
|
||||
// plan9Info is an internal structure used for ref-counting Plan9 shares mapped to a Linux utility VM.
|
||||
type plan9Info struct {
|
||||
refCount uint32
|
||||
idCounter uint64
|
||||
uvmPath string
|
||||
port int32 // Temporary. TODO Remove
|
||||
}
|
||||
type nicInfo struct {
|
||||
ID guid.GUID
|
||||
Endpoint *hns.HNSEndpoint
|
||||
}
|
||||
|
||||
type namespaceInfo struct {
|
||||
nics []nicInfo
|
||||
refCount int
|
||||
}
|
||||
|
||||
// UtilityVM is the object used by clients representing a utility VM
|
||||
type UtilityVM struct {
|
||||
id string // Identifier for the utility VM (user supplied or generated)
|
||||
owner string // Owner for the utility VM (user supplied or generated)
|
||||
operatingSystem string // "windows" or "linux"
|
||||
hcsSystem *hcs.System // The handle to the compute system
|
||||
m sync.Mutex // Lock for adding/removing devices
|
||||
|
||||
// containerCounter is the current number of containers that have been
|
||||
// created. This is never decremented in the life of the UVM.
|
||||
//
|
||||
// NOTE: All accesses to this MUST be done atomically.
|
||||
containerCounter uint64
|
||||
|
||||
// VSMB shares that are mapped into a Windows UVM. These are used for read-only
|
||||
// layers and mapped directories
|
||||
vsmbShares map[string]*vsmbShare
|
||||
vsmbCounter uint64 // Counter to generate a unique share name for each VSMB share.
|
||||
|
||||
// VPMEM devices that are mapped into a Linux UVM. These are used for read-only layers, or for
|
||||
// booting from VHD.
|
||||
vpmemDevices [MaxVPMEMCount]vpmemInfo // Limited by ACPI size.
|
||||
vpmemMaxCount uint32 // Actual number of VPMem devices
|
||||
vpmemMaxSizeBytes uint64 // Actual size of VPMem devices
|
||||
|
||||
// SCSI devices that are mapped into a Windows or Linux utility VM
|
||||
scsiLocations [4][64]scsiInfo // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though.
|
||||
scsiControllerCount uint32 // Number of SCSI controllers in the utility VM
|
||||
|
||||
// Plan9 are directories mapped into a Linux utility VM
|
||||
plan9Shares map[string]*plan9Info
|
||||
plan9Counter uint64 // Each newly-added plan9 share has a counter used as its ID in the ResourceURI and for the name
|
||||
|
||||
namespaces map[string]*namespaceInfo
|
||||
|
||||
outputListener net.Listener
|
||||
outputProcessingDone chan struct{}
|
||||
outputHandler OutputHandler
|
||||
outputProcessingCancel context.CancelFunc
|
||||
}
|
170
vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go
generated
vendored
170
vendor/github.com/Microsoft/hcsshim/internal/uvm/vpmem.go
generated
vendored
@ -1,170 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/guestrequest"
|
||||
"github.com/Microsoft/hcsshim/internal/requesttype"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// allocateVPMEM finds the next available VPMem slot. The lock MUST be held
|
||||
// when calling this function.
|
||||
func (uvm *UtilityVM) allocateVPMEM(hostPath string) (uint32, error) {
|
||||
for index, vi := range uvm.vpmemDevices {
|
||||
if vi.hostPath == "" {
|
||||
vi.hostPath = hostPath
|
||||
logrus.Debugf("uvm::allocateVPMEM %d %q", index, hostPath)
|
||||
return uint32(index), nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("no free VPMEM locations")
|
||||
}
|
||||
|
||||
func (uvm *UtilityVM) deallocateVPMEM(deviceNumber uint32) error {
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
uvm.vpmemDevices[deviceNumber] = vpmemInfo{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lock must be held when calling this function
|
||||
func (uvm *UtilityVM) findVPMEMDevice(findThisHostPath string) (uint32, string, error) {
|
||||
for deviceNumber, vi := range uvm.vpmemDevices {
|
||||
if vi.hostPath == findThisHostPath {
|
||||
logrus.Debugf("uvm::findVPMEMDeviceNumber %d %s", deviceNumber, findThisHostPath)
|
||||
return uint32(deviceNumber), vi.uvmPath, nil
|
||||
}
|
||||
}
|
||||
return 0, "", fmt.Errorf("%s is not attached to VPMEM", findThisHostPath)
|
||||
}
|
||||
|
||||
// AddVPMEM adds a VPMEM disk to a utility VM at the next available location.
|
||||
//
|
||||
// Returns the location(0..MaxVPMEM-1) where the device is attached, and if exposed,
|
||||
// the utility VM path which will be /tmp/p<location>//
|
||||
func (uvm *UtilityVM) AddVPMEM(hostPath string, expose bool) (uint32, string, error) {
|
||||
if uvm.operatingSystem != "linux" {
|
||||
return 0, "", errNotSupported
|
||||
}
|
||||
|
||||
logrus.Debugf("uvm::AddVPMEM id:%s hostPath:%s expose:%t", uvm.id, hostPath, expose)
|
||||
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
|
||||
var deviceNumber uint32
|
||||
var err error
|
||||
uvmPath := ""
|
||||
|
||||
deviceNumber, uvmPath, err = uvm.findVPMEMDevice(hostPath)
|
||||
if err != nil {
|
||||
// Ensure the utility VM has access
|
||||
if err := wclayer.GrantVmAccess(uvm.ID(), hostPath); err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
// It doesn't exist, so we're going to allocate and hot-add it
|
||||
deviceNumber, err = uvm.allocateVPMEM(hostPath)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
modification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Add,
|
||||
Settings: hcsschema.VirtualPMemDevice{
|
||||
HostPath: hostPath,
|
||||
ReadOnly: true,
|
||||
ImageFormat: "Vhd1",
|
||||
},
|
||||
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/VirtualPMem/Devices/%d", deviceNumber),
|
||||
}
|
||||
|
||||
if expose {
|
||||
uvmPath = fmt.Sprintf("/tmp/p%d", deviceNumber)
|
||||
modification.GuestRequest = guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeVPMemDevice,
|
||||
RequestType: requesttype.Add,
|
||||
Settings: guestrequest.LCOWMappedVPMemDevice{
|
||||
DeviceNumber: deviceNumber,
|
||||
MountPath: uvmPath,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if err := uvm.Modify(modification); err != nil {
|
||||
uvm.vpmemDevices[deviceNumber] = vpmemInfo{}
|
||||
return 0, "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err)
|
||||
}
|
||||
|
||||
uvm.vpmemDevices[deviceNumber] = vpmemInfo{
|
||||
hostPath: hostPath,
|
||||
refCount: 1,
|
||||
uvmPath: uvmPath}
|
||||
} else {
|
||||
pmemi := vpmemInfo{
|
||||
hostPath: hostPath,
|
||||
refCount: uvm.vpmemDevices[deviceNumber].refCount + 1,
|
||||
uvmPath: uvmPath}
|
||||
uvm.vpmemDevices[deviceNumber] = pmemi
|
||||
}
|
||||
logrus.Debugf("hcsshim::AddVPMEM id:%s Success %+v", uvm.id, uvm.vpmemDevices[deviceNumber])
|
||||
return deviceNumber, uvmPath, nil
|
||||
|
||||
}
|
||||
|
||||
// RemoveVPMEM removes a VPMEM disk from a utility VM. As an external API, it
|
||||
// is "safe". Internal use can call removeVPMEM.
|
||||
func (uvm *UtilityVM) RemoveVPMEM(hostPath string) error {
|
||||
if uvm.operatingSystem != "linux" {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
|
||||
// Make sure is actually attached
|
||||
deviceNumber, uvmPath, err := uvm.findVPMEMDevice(hostPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot remove VPMEM %s as it is not attached to utility VM %s: %s", hostPath, uvm.id, err)
|
||||
}
|
||||
|
||||
if err := uvm.removeVPMEM(hostPath, uvmPath, deviceNumber); err != nil {
|
||||
return fmt.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeVPMEM is the internally callable "unsafe" version of RemoveVPMEM. The mutex
|
||||
// MUST be held when calling this function.
|
||||
func (uvm *UtilityVM) removeVPMEM(hostPath string, uvmPath string, deviceNumber uint32) error {
|
||||
logrus.Debugf("uvm::RemoveVPMEM id:%s hostPath:%s device:%d", uvm.id, hostPath, deviceNumber)
|
||||
|
||||
if uvm.vpmemDevices[deviceNumber].refCount == 1 {
|
||||
modification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Remove,
|
||||
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/VirtualPMem/Devices/%d", deviceNumber),
|
||||
GuestRequest: guestrequest.GuestRequest{
|
||||
ResourceType: guestrequest.ResourceTypeVPMemDevice,
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: guestrequest.LCOWMappedVPMemDevice{
|
||||
DeviceNumber: deviceNumber,
|
||||
MountPath: uvmPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := uvm.Modify(modification); err != nil {
|
||||
return err
|
||||
}
|
||||
uvm.vpmemDevices[deviceNumber] = vpmemInfo{}
|
||||
logrus.Debugf("uvm::RemoveVPMEM: Success id:%s hostPath:%s device:%d", uvm.id, hostPath, deviceNumber)
|
||||
return nil
|
||||
}
|
||||
uvm.vpmemDevices[deviceNumber].refCount--
|
||||
logrus.Debugf("uvm::RemoveVPMEM: Success id:%s hostPath:%s device:%d refCount:%d", uvm.id, hostPath, deviceNumber, uvm.vpmemDevices[deviceNumber].refCount)
|
||||
return nil
|
||||
|
||||
}
|
112
vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go
generated
vendored
112
vendor/github.com/Microsoft/hcsshim/internal/uvm/vsmb.go
generated
vendored
@ -1,112 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/requesttype"
|
||||
"github.com/Microsoft/hcsshim/internal/schema2"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// findVSMBShare finds a share by `hostPath`. If not found returns `ErrNotAttached`.
|
||||
func (uvm *UtilityVM) findVSMBShare(hostPath string) (*vsmbShare, error) {
|
||||
share, ok := uvm.vsmbShares[hostPath]
|
||||
if !ok {
|
||||
return nil, ErrNotAttached
|
||||
}
|
||||
return share, nil
|
||||
}
|
||||
|
||||
func (share *vsmbShare) GuestPath() string {
|
||||
return `\\?\VMSMB\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\` + share.name
|
||||
}
|
||||
|
||||
// AddVSMB adds a VSMB share to a Windows utility VM. Each VSMB share is ref-counted and
|
||||
// only added if it isn't already. This is used for read-only layers, mapped directories
|
||||
// to a container, and for mapped pipes.
|
||||
func (uvm *UtilityVM) AddVSMB(hostPath string, guestRequest interface{}, options *hcsschema.VirtualSmbShareOptions) error {
|
||||
if uvm.operatingSystem != "windows" {
|
||||
return errNotSupported
|
||||
}
|
||||
|
||||
logrus.Debugf("uvm::AddVSMB %s %+v %+v id:%s", hostPath, guestRequest, options, uvm.id)
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
share, err := uvm.findVSMBShare(hostPath)
|
||||
if err == ErrNotAttached {
|
||||
uvm.vsmbCounter++
|
||||
shareName := "s" + strconv.FormatUint(uvm.vsmbCounter, 16)
|
||||
|
||||
modification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Add,
|
||||
Settings: hcsschema.VirtualSmbShare{
|
||||
Name: shareName,
|
||||
Options: options,
|
||||
Path: hostPath,
|
||||
},
|
||||
ResourcePath: "VirtualMachine/Devices/VirtualSmb/Shares",
|
||||
}
|
||||
|
||||
if err := uvm.Modify(modification); err != nil {
|
||||
return err
|
||||
}
|
||||
share = &vsmbShare{
|
||||
name: shareName,
|
||||
guestRequest: guestRequest,
|
||||
}
|
||||
uvm.vsmbShares[hostPath] = share
|
||||
}
|
||||
share.refCount++
|
||||
logrus.Debugf("hcsshim::AddVSMB Success %s: refcount=%d %+v", hostPath, share.refCount, share)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveVSMB removes a VSMB share from a utility VM. Each VSMB share is ref-counted
|
||||
// and only actually removed when the ref-count drops to zero.
|
||||
func (uvm *UtilityVM) RemoveVSMB(hostPath string) error {
|
||||
if uvm.operatingSystem != "windows" {
|
||||
return errNotSupported
|
||||
}
|
||||
logrus.Debugf("uvm::RemoveVSMB %s id:%s", hostPath, uvm.id)
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
share, err := uvm.findVSMBShare(hostPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s is not present as a VSMB share in %s, cannot remove", hostPath, uvm.id)
|
||||
}
|
||||
|
||||
share.refCount--
|
||||
if share.refCount > 0 {
|
||||
logrus.Debugf("uvm::RemoveVSMB Success %s id:%s Ref-count now %d. It is still present in the utility VM", hostPath, uvm.id, share.refCount)
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("uvm::RemoveVSMB Zero ref-count, removing. %s id:%s", hostPath, uvm.id)
|
||||
modification := &hcsschema.ModifySettingRequest{
|
||||
RequestType: requesttype.Remove,
|
||||
Settings: hcsschema.VirtualSmbShare{Name: share.name},
|
||||
ResourcePath: "VirtualMachine/Devices/VirtualSmb/Shares",
|
||||
}
|
||||
if err := uvm.Modify(modification); err != nil {
|
||||
return fmt.Errorf("failed to remove vsmb share %s from %s: %+v: %s", hostPath, uvm.id, modification, err)
|
||||
}
|
||||
logrus.Debugf("uvm::RemoveVSMB Success %s id:%s successfully removed from utility VM", hostPath, uvm.id)
|
||||
delete(uvm.vsmbShares, hostPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetVSMBUvmPath returns the guest path of a VSMB mount.
|
||||
func (uvm *UtilityVM) GetVSMBUvmPath(hostPath string) (string, error) {
|
||||
if hostPath == "" {
|
||||
return "", fmt.Errorf("no hostPath passed to GetVSMBUvmPath")
|
||||
}
|
||||
uvm.m.Lock()
|
||||
defer uvm.m.Unlock()
|
||||
share, err := uvm.findVSMBShare(hostPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path := share.GuestPath()
|
||||
logrus.Debugf("uvm::GetVSMBUvmPath Success %s id:%s path:%s", hostPath, uvm.id, path)
|
||||
return path, nil
|
||||
}
|
46
vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go
generated
vendored
46
vendor/github.com/Microsoft/hcsshim/internal/uvm/wait.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package uvm
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (uvm *UtilityVM) waitForOutput() {
|
||||
logrus.WithField(logfields.UVMID, uvm.ID()).
|
||||
Debug("UVM exited, waiting for output processing to complete")
|
||||
if uvm.outputProcessingDone != nil {
|
||||
<-uvm.outputProcessingDone
|
||||
}
|
||||
}
|
||||
|
||||
// Waits synchronously waits for a utility VM to terminate.
|
||||
func (uvm *UtilityVM) Wait() error {
|
||||
err := uvm.hcsSystem.Wait()
|
||||
|
||||
// outputProcessingCancel will only cancel waiting for the vsockexec
|
||||
// connection, it won't stop output processing once the connection is
|
||||
// established.
|
||||
if uvm.outputProcessingCancel != nil {
|
||||
uvm.outputProcessingCancel()
|
||||
}
|
||||
uvm.waitForOutput()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitExpectedError synchronously waits for a utility VM to terminate. If the
|
||||
// UVM terminates successfully, or if the given error is encountered internally
|
||||
// during the wait, this function returns nil.
|
||||
func (uvm *UtilityVM) WaitExpectedError(expected error) error {
|
||||
err := uvm.hcsSystem.WaitExpectedError(expected)
|
||||
|
||||
// outputProcessingCancel will only cancel waiting for the vsockexec
|
||||
// connection, it won't stop output processing once the connection is
|
||||
// established.
|
||||
if uvm.outputProcessingCancel != nil {
|
||||
uvm.outputProcessingCancel()
|
||||
}
|
||||
uvm.waitForOutput()
|
||||
|
||||
return err
|
||||
}
|
35
vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/locate.go
generated
vendored
35
vendor/github.com/Microsoft/hcsshim/internal/uvmfolder/locate.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
package uvmfolder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LocateUVMFolder searches a set of layer folders to determine the "uppermost"
|
||||
// layer which has a utility VM image. The order of the layers is (for historical) reasons
|
||||
// Read-only-layers followed by an optional read-write layer. The RO layers are in reverse
|
||||
// order so that the upper-most RO layer is at the start, and the base OS layer is the
|
||||
// end.
|
||||
func LocateUVMFolder(layerFolders []string) (string, error) {
|
||||
var uvmFolder string
|
||||
index := 0
|
||||
for _, layerFolder := range layerFolders {
|
||||
_, err := os.Stat(filepath.Join(layerFolder, `UtilityVM`))
|
||||
if err == nil {
|
||||
uvmFolder = layerFolder
|
||||
break
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
index++
|
||||
}
|
||||
if uvmFolder == "" {
|
||||
return "", fmt.Errorf("utility VM folder could not be found in layers")
|
||||
}
|
||||
logrus.Debugf("hcsshim::LocateUVMFolder At %d of %d: %s", index+1, len(layerFolders), uvmFolder)
|
||||
return uvmFolder, nil
|
||||
}
|
26
vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/internal/wcow/scratch.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package wcow
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/copyfile"
|
||||
"github.com/Microsoft/hcsshim/internal/wclayer"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CreateUVMScratch is a helper to create a scratch for a Windows utility VM
|
||||
// with permissions to the specified VM ID in a specified directory
|
||||
func CreateUVMScratch(imagePath, destDirectory, vmID string) error {
|
||||
sourceScratch := filepath.Join(imagePath, `UtilityVM\SystemTemplate.vhdx`)
|
||||
targetScratch := filepath.Join(destDirectory, "sandbox.vhdx")
|
||||
logrus.Debugf("uvm::CreateUVMScratch %s from %s", targetScratch, sourceScratch)
|
||||
if err := copyfile.CopyFile(sourceScratch, targetScratch, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := wclayer.GrantVmAccess(vmID, targetScratch); err != nil {
|
||||
os.Remove(targetScratch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user