vendor update

This commit is contained in:
Nathan Gieseker 2019-06-17 21:33:55 -07:00
parent 13fbc4afdf
commit e8c953999e
900 changed files with 36135 additions and 265442 deletions

View File

@ -1,191 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,22 +0,0 @@
runhcs is a fork of runc.
The following is runc's legal notice.
---
runc
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -1,848 +0,0 @@
package main
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/cni"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hcsoci"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/regstate"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
)
var errContainerStopped = errors.New("container is stopped")
type persistedState struct {
// ID is the id of this container/UVM.
ID string `json:",omitempty"`
// Owner is the owner value passed into the runhcs command and may be `""`.
Owner string `json:",omitempty"`
// SandboxID is the sandbox identifer passed in via OCI specifications. This
// can either be the sandbox itself or the sandbox this container should run
// in. See `parseSandboxAnnotations`.
SandboxID string `json:",omitempty"`
// HostID will be VM ID hosting this container. If a sandbox is used it will
// match the `SandboxID`.
HostID string `json:",omitempty"`
// Bundle is the folder path on disk where the container state and spec files
// reside.
Bundle string `json:",omitempty"`
Created time.Time `json:",omitempty"`
Rootfs string `json:",omitempty"`
// Spec is the in memory deserialized values found on `Bundle\config.json`.
Spec *specs.Spec `json:",omitempty"`
RequestedNetNS string `json:",omitempty"`
// IsHost is `true` when this is a VM isolated config.
IsHost bool `json:",omitempty"`
// UniqueID is a unique ID generated per container config.
UniqueID guid.GUID `json:",omitempty"`
// HostUniqueID is the unique ID of the hosting VM if this container is
// hosted.
HostUniqueID guid.GUID `json:",omitempty"`
}
type containerStatus string
const (
containerRunning containerStatus = "running"
containerStopped containerStatus = "stopped"
containerCreated containerStatus = "created"
containerPaused containerStatus = "paused"
containerUnknown containerStatus = "unknown"
keyState = "state"
keyResources = "resources"
keyShimPid = "shim"
keyInitPid = "pid"
keyNetNS = "netns"
// keyPidMapFmt is the format to use when mapping a host OS pid to a guest
// pid.
keyPidMapFmt = "pid-%d"
)
type container struct {
persistedState
ShimPid int
hc *hcs.System
resources *hcsoci.Resources
}
func startProcessShim(id, pidFile, logFile string, spec *specs.Process) (_ *os.Process, err error) {
// Ensure the stdio handles inherit to the child process. This isn't undone
// after the StartProcess call because the caller never launches another
// process before exiting.
for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} {
err = windows.SetHandleInformation(windows.Handle(f.Fd()), windows.HANDLE_FLAG_INHERIT, windows.HANDLE_FLAG_INHERIT)
if err != nil {
return nil, err
}
}
args := []string{
"--stdin", strconv.Itoa(int(os.Stdin.Fd())),
"--stdout", strconv.Itoa(int(os.Stdout.Fd())),
"--stderr", strconv.Itoa(int(os.Stderr.Fd())),
}
if spec != nil {
args = append(args, "--exec")
}
if strings.HasPrefix(logFile, runhcs.SafePipePrefix) {
args = append(args, "--log-pipe", logFile)
}
args = append(args, id)
return launchShim("shim", pidFile, logFile, args, spec)
}
func launchShim(cmd, pidFile, logFile string, args []string, data interface{}) (_ *os.Process, err error) {
executable, err := os.Executable()
if err != nil {
return nil, err
}
// Create a pipe to use as stderr for the shim process. This is used to
// retrieve early error information, up to the point that the shim is ready
// to launch a process in the container.
rp, wp, err := os.Pipe()
if err != nil {
return nil, err
}
defer rp.Close()
defer wp.Close()
// Create a pipe to send the data, if one is provided.
var rdatap, wdatap *os.File
if data != nil {
rdatap, wdatap, err = os.Pipe()
if err != nil {
return nil, err
}
defer rdatap.Close()
defer wdatap.Close()
}
var log *os.File
fullargs := []string{os.Args[0]}
if logFile != "" {
if !strings.HasPrefix(logFile, runhcs.SafePipePrefix) {
log, err = os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666)
if err != nil {
return nil, err
}
defer log.Close()
}
fullargs = append(fullargs, "--log-format", logFormat)
if logrus.GetLevel() == logrus.DebugLevel {
fullargs = append(fullargs, "--debug")
}
}
fullargs = append(fullargs, cmd)
fullargs = append(fullargs, args...)
attr := &os.ProcAttr{
Files: []*os.File{rdatap, wp, log},
}
p, err := os.StartProcess(executable, fullargs, attr)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
p.Kill()
}
}()
wp.Close()
// Write the data if provided.
if data != nil {
rdatap.Close()
dataj, err := json.Marshal(data)
if err != nil {
return nil, err
}
_, err = wdatap.Write(dataj)
if err != nil {
return nil, err
}
wdatap.Close()
}
err = runhcs.GetErrorFromPipe(rp, p)
if err != nil {
return nil, err
}
if pidFile != "" {
if err = createPidFile(pidFile, p.Pid); err != nil {
return nil, err
}
}
return p, nil
}
// parseSandboxAnnotations searches `a` for various annotations used by
// different runtimes to represent a sandbox ID, and sandbox type.
//
// If found returns the tuple `(sandboxID, isSandbox)` where `isSandbox == true`
// indicates the identifer is the sandbox itself; `isSandbox == false` indicates
// the identifer is the sandbox in which to place this container. Otherwise
// returns `("", false)`.
func parseSandboxAnnotations(a map[string]string) (string, bool) {
var t, id string
if t = a["io.kubernetes.cri.container-type"]; t != "" {
id = a["io.kubernetes.cri.sandbox-id"]
} else if t = a["io.kubernetes.cri-o.ContainerType"]; t != "" {
id = a["io.kubernetes.cri-o.SandboxID"]
} else if t = a["io.kubernetes.docker.type"]; t != "" {
id = a["io.kubernetes.sandbox.id"]
if t == "podsandbox" {
t = "sandbox"
}
}
if t == "container" {
return id, false
}
if t == "sandbox" {
return id, true
}
return "", false
}
// parseAnnotationsBool searches `a` for `key` and if found verifies that the
// value is `true` or `false` in any case. If `key` is not found returns `def`.
func parseAnnotationsBool(a map[string]string, key string, def bool) bool {
if v, ok := a[key]; ok {
switch strings.ToLower(v) {
case "true":
return true
case "false":
return false
default:
logrus.WithFields(logrus.Fields{
logfields.OCIAnnotation: key,
logfields.Value: v,
logfields.ExpectedType: logfields.Bool,
}).Warning("annotation could not be parsed")
}
}
return def
}
// parseAnnotationsCPU searches `s.Annotations` for the CPU annotation. If
// not found searches `s` for the Windows CPU section. If neither are found
// returns `def`.
func parseAnnotationsCPU(s *specs.Spec, annotation string, def int32) int32 {
if m := parseAnnotationsUint64(s.Annotations, annotation, 0); m != 0 {
return int32(m)
}
if s.Windows != nil &&
s.Windows.Resources != nil &&
s.Windows.Resources.CPU != nil &&
s.Windows.Resources.CPU.Count != nil &&
*s.Windows.Resources.CPU.Count > 0 {
return int32(*s.Windows.Resources.CPU.Count)
}
return def
}
// parseAnnotationsMemory searches `s.Annotations` for the memory annotation. If
// not found searches `s` for the Windows memory section. If neither are found
// returns `def`.
func parseAnnotationsMemory(s *specs.Spec, annotation string, def int32) int32 {
if m := parseAnnotationsUint64(s.Annotations, annotation, 0); m != 0 {
return int32(m)
}
if s.Windows != nil &&
s.Windows.Resources != nil &&
s.Windows.Resources.Memory != nil &&
s.Windows.Resources.Memory.Limit != nil &&
*s.Windows.Resources.Memory.Limit > 0 {
return int32(*s.Windows.Resources.Memory.Limit)
}
return def
}
// parseAnnotationsPreferredRootFSType searches `a` for `key` and verifies that the
// value is in the set of allowed values. If `key` is not found returns `def`.
func parseAnnotationsPreferredRootFSType(a map[string]string, key string, def uvm.PreferredRootFSType) uvm.PreferredRootFSType {
if v, ok := a[key]; ok {
switch v {
case "initrd":
return uvm.PreferredRootFSTypeInitRd
case "vhd":
return uvm.PreferredRootFSTypeVHD
default:
logrus.Warningf("annotation: '%s', with value: '%s' must be 'initrd' or 'vhd'", key, v)
}
}
return def
}
// parseAnnotationsUint32 searches `a` for `key` and if found verifies that the
// value is a 32 bit unsigned integer. If `key` is not found returns `def`.
func parseAnnotationsUint32(a map[string]string, key string, def uint32) uint32 {
if v, ok := a[key]; ok {
countu, err := strconv.ParseUint(v, 10, 32)
if err == nil {
v := uint32(countu)
return v
}
logrus.WithFields(logrus.Fields{
logfields.OCIAnnotation: key,
logfields.Value: v,
logfields.ExpectedType: logfields.Uint32,
logrus.ErrorKey: err,
}).Warning("annotation could not be parsed")
}
return def
}
// parseAnnotationsUint64 searches `a` for `key` and if found verifies that the
// value is a 64 bit unsigned integer. If `key` is not found returns `def`.
func parseAnnotationsUint64(a map[string]string, key string, def uint64) uint64 {
if v, ok := a[key]; ok {
countu, err := strconv.ParseUint(v, 10, 64)
if err == nil {
return countu
}
logrus.WithFields(logrus.Fields{
logfields.OCIAnnotation: key,
logfields.Value: v,
logfields.ExpectedType: logfields.Uint64,
logrus.ErrorKey: err,
}).Warning("annotation could not be parsed")
}
return def
}
// startVMShim starts a vm-shim command with the specified `opts`. `opts` can be `uvm.OptionsWCOW` or `uvm.OptionsLCOW`
func (c *container) startVMShim(logFile string, opts interface{}) (*os.Process, error) {
var os string
if _, ok := opts.(*uvm.OptionsLCOW); ok {
os = "linux"
} else {
os = "windows"
}
args := []string{"--os", os}
if strings.HasPrefix(logFile, runhcs.SafePipePrefix) {
args = append(args, "--log-pipe", logFile)
}
args = append(args, c.VMPipePath())
return launchShim("vmshim", "", logFile, args, opts)
}
type containerConfig struct {
ID string
Owner string
HostID string
PidFile string
ShimLogFile, VMLogFile string
Spec *specs.Spec
VMConsolePipe string
}
func createContainer(cfg *containerConfig) (_ *container, err error) {
// Store the container information in a volatile registry key.
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
vmisolated := cfg.Spec.Linux != nil || (cfg.Spec.Windows != nil && cfg.Spec.Windows.HyperV != nil)
sandboxID, isSandbox := parseSandboxAnnotations(cfg.Spec.Annotations)
hostID := cfg.HostID
if isSandbox {
if sandboxID != cfg.ID {
return nil, errors.New("sandbox ID must match ID")
}
} else if sandboxID != "" {
// Validate that the sandbox container exists.
sandbox, err := getContainer(sandboxID, false)
if err != nil {
return nil, err
}
defer sandbox.Close()
if sandbox.SandboxID != sandboxID {
return nil, fmt.Errorf("container %s is not a sandbox", sandboxID)
}
if hostID == "" {
// Use the sandbox's host.
hostID = sandbox.HostID
} else if sandbox.HostID == "" {
return nil, fmt.Errorf("sandbox container %s is not running in a VM host, but host %s was specified", sandboxID, hostID)
} else if hostID != sandbox.HostID {
return nil, fmt.Errorf("sandbox container %s has a different host %s from the requested host %s", sandboxID, sandbox.HostID, hostID)
}
if vmisolated && hostID == "" {
return nil, fmt.Errorf("container %s is not a VM isolated sandbox", sandboxID)
}
}
uniqueID := guid.New()
newvm := false
var hostUniqueID guid.GUID
if hostID != "" {
host, err := getContainer(hostID, false)
if err != nil {
return nil, err
}
defer host.Close()
if !host.IsHost {
return nil, fmt.Errorf("host container %s is not a VM host", hostID)
}
hostUniqueID = host.UniqueID
} else if vmisolated && (isSandbox || cfg.Spec.Linux != nil || osversion.Get().Build >= osversion.RS5) {
// This handles all LCOW, Pod Sandbox, and (Windows Xenon V2 for RS5+)
hostID = cfg.ID
newvm = true
hostUniqueID = uniqueID
}
// Make absolute the paths in Root.Path and Windows.LayerFolders.
rootfs := ""
if cfg.Spec.Root != nil {
rootfs = cfg.Spec.Root.Path
if rootfs != "" && !filepath.IsAbs(rootfs) && !strings.HasPrefix(rootfs, `\\?\`) {
rootfs = filepath.Join(cwd, rootfs)
cfg.Spec.Root.Path = rootfs
}
}
netNS := ""
if cfg.Spec.Windows != nil {
for i, f := range cfg.Spec.Windows.LayerFolders {
if !filepath.IsAbs(f) && !strings.HasPrefix(rootfs, `\\?\`) {
cfg.Spec.Windows.LayerFolders[i] = filepath.Join(cwd, f)
}
}
// Determine the network namespace to use.
if cfg.Spec.Windows.Network != nil {
if cfg.Spec.Windows.Network.NetworkSharedContainerName != "" {
// RS4 case
err = stateKey.Get(cfg.Spec.Windows.Network.NetworkSharedContainerName, keyNetNS, &netNS)
if err != nil {
if _, ok := err.(*regstate.NoStateError); !ok {
return nil, err
}
}
} else if cfg.Spec.Windows.Network.NetworkNamespace != "" {
// RS5 case
netNS = cfg.Spec.Windows.Network.NetworkNamespace
}
}
}
// Store the initial container state in the registry so that the delete
// command can clean everything up if something goes wrong.
c := &container{
persistedState: persistedState{
ID: cfg.ID,
Owner: cfg.Owner,
Bundle: cwd,
Rootfs: rootfs,
Created: time.Now(),
Spec: cfg.Spec,
SandboxID: sandboxID,
HostID: hostID,
IsHost: newvm,
RequestedNetNS: netNS,
UniqueID: uniqueID,
HostUniqueID: hostUniqueID,
},
}
err = stateKey.Create(cfg.ID, keyState, &c.persistedState)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
c.Remove()
}
}()
if isSandbox && vmisolated {
cnicfg := cni.NewPersistedNamespaceConfig(netNS, cfg.ID, hostUniqueID)
err = cnicfg.Store()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
cnicfg.Remove()
}
}()
}
// Start a VM if necessary.
if newvm {
var opts interface{}
const (
annotationAllowOvercommit = "io.microsoft.virtualmachine.computetopology.memory.allowovercommit"
annotationEnableDeferredCommit = "io.microsoft.virtualmachine.computetopology.memory.enabledeferredcommit"
annotationMemorySizeInMB = "io.microsoft.virtualmachine.computetopology.memory.sizeinmb"
annotationProcessorCount = "io.microsoft.virtualmachine.computetopology.processor.count"
annotationVPMemCount = "io.microsoft.virtualmachine.devices.virtualpmem.maximumcount"
annotationVPMemSize = "io.microsoft.virtualmachine.devices.virtualpmem.maximumsizebytes"
annotationPreferredRootFSType = "io.microsoft.virtualmachine.lcow.preferredrootfstype"
)
if cfg.Spec.Linux != nil {
lopts := uvm.NewDefaultOptionsLCOW(vmID(c.ID), cfg.Owner)
lopts.MemorySizeInMB = parseAnnotationsMemory(cfg.Spec, annotationMemorySizeInMB, lopts.MemorySizeInMB)
lopts.AllowOvercommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationAllowOvercommit, lopts.AllowOvercommit)
lopts.EnableDeferredCommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationEnableDeferredCommit, lopts.EnableDeferredCommit)
lopts.ProcessorCount = parseAnnotationsCPU(cfg.Spec, annotationProcessorCount, lopts.ProcessorCount)
lopts.ConsolePipe = cfg.VMConsolePipe
lopts.VPMemDeviceCount = parseAnnotationsUint32(cfg.Spec.Annotations, annotationVPMemCount, lopts.VPMemDeviceCount)
lopts.VPMemSizeBytes = parseAnnotationsUint64(cfg.Spec.Annotations, annotationVPMemSize, lopts.VPMemSizeBytes)
lopts.PreferredRootFSType = parseAnnotationsPreferredRootFSType(cfg.Spec.Annotations, annotationPreferredRootFSType, lopts.PreferredRootFSType)
switch lopts.PreferredRootFSType {
case uvm.PreferredRootFSTypeInitRd:
lopts.RootFSFile = uvm.InitrdFile
case uvm.PreferredRootFSTypeVHD:
lopts.RootFSFile = uvm.VhdFile
}
opts = lopts
} else {
wopts := uvm.NewDefaultOptionsWCOW(vmID(c.ID), cfg.Owner)
wopts.MemorySizeInMB = parseAnnotationsMemory(cfg.Spec, annotationMemorySizeInMB, wopts.MemorySizeInMB)
wopts.AllowOvercommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationAllowOvercommit, wopts.AllowOvercommit)
wopts.EnableDeferredCommit = parseAnnotationsBool(cfg.Spec.Annotations, annotationEnableDeferredCommit, wopts.EnableDeferredCommit)
wopts.ProcessorCount = parseAnnotationsCPU(cfg.Spec, annotationProcessorCount, wopts.ProcessorCount)
// In order for the UVM sandbox.vhdx not to collide with the actual
// nested Argon sandbox.vhdx we append the \vm folder to the last entry
// in the list.
layersLen := len(cfg.Spec.Windows.LayerFolders)
layers := make([]string, layersLen)
copy(layers, cfg.Spec.Windows.LayerFolders)
vmPath := filepath.Join(layers[layersLen-1], "vm")
err := os.MkdirAll(vmPath, 0)
if err != nil {
return nil, err
}
layers[layersLen-1] = vmPath
wopts.LayerFolders = layers
opts = wopts
}
shim, err := c.startVMShim(cfg.VMLogFile, opts)
if err != nil {
return nil, err
}
shim.Release()
}
if c.HostID != "" {
// Call to the VM shim process to create the container. This is done so
// that the VM process can keep track of the VM's virtual hardware
// resource use.
err = c.issueVMRequest(runhcs.OpCreateContainer)
if err != nil {
return nil, err
}
c.hc, err = hcs.OpenComputeSystem(cfg.ID)
if err != nil {
return nil, err
}
} else {
// Create the container directly from this process.
err = createContainerInHost(c, nil)
if err != nil {
return nil, err
}
}
// Create the shim process for the container.
err = startContainerShim(c, cfg.PidFile, cfg.ShimLogFile)
if err != nil {
if e := c.Kill(); e == nil {
c.Remove()
}
return nil, err
}
return c, nil
}
func (c *container) ShimPipePath() string {
return runhcs.SafePipePath("runhcs-shim-" + c.UniqueID.String())
}
func (c *container) VMPipePath() string {
return runhcs.VMPipePath(c.HostUniqueID)
}
func (c *container) VMIsolated() bool {
return c.HostID != ""
}
func (c *container) unmountInHost(vm *uvm.UtilityVM, all bool) error {
resources := &hcsoci.Resources{}
err := stateKey.Get(c.ID, keyResources, resources)
if _, ok := err.(*regstate.NoStateError); ok {
return nil
}
if err != nil {
return err
}
err = hcsoci.ReleaseResources(resources, vm, all)
if err != nil {
stateKey.Set(c.ID, keyResources, resources)
return err
}
err = stateKey.Clear(c.ID, keyResources)
if err != nil {
return err
}
return nil
}
func (c *container) Unmount(all bool) error {
if c.VMIsolated() {
op := runhcs.OpUnmountContainerDiskOnly
if all {
op = runhcs.OpUnmountContainer
}
err := c.issueVMRequest(op)
if err != nil {
if _, ok := err.(*noVMError); ok {
logrus.WithFields(logrus.Fields{
logfields.ContainerID: c.ID,
logfields.UVMID: c.HostID,
logrus.ErrorKey: errors.New("failed to unmount container resources"),
}).Warning("VM shim could not be contacted")
} else {
return err
}
}
} else {
c.unmountInHost(nil, false)
}
return nil
}
func createContainerInHost(c *container, vm *uvm.UtilityVM) (err error) {
if c.hc != nil {
return errors.New("container already created")
}
// Create the container without starting it.
opts := &hcsoci.CreateOptions{
ID: c.ID,
Owner: c.Owner,
Spec: c.Spec,
HostingSystem: vm,
NetworkNamespace: c.RequestedNetNS,
}
vmid := ""
if vm != nil {
vmid = vm.ID()
}
logrus.WithFields(logrus.Fields{
logfields.ContainerID: c.ID,
logfields.UVMID: vmid,
}).Info("creating container in UVM")
hc, resources, err := hcsoci.CreateContainer(opts)
if err != nil {
return err
}
defer func() {
if err != nil {
hc.Terminate()
hc.Wait()
hcsoci.ReleaseResources(resources, vm, true)
}
}()
// Record the network namespace to support namespace sharing by container ID.
if resources.NetNS() != "" {
err = stateKey.Set(c.ID, keyNetNS, resources.NetNS())
if err != nil {
return err
}
}
err = stateKey.Set(c.ID, keyResources, resources)
if err != nil {
return err
}
c.hc = hc
return nil
}
func startContainerShim(c *container, pidFile, logFile string) error {
// Launch a shim process to later execute a process in the container.
shim, err := startProcessShim(c.ID, pidFile, logFile, nil)
if err != nil {
return err
}
defer shim.Release()
defer func() {
if err != nil {
shim.Kill()
}
}()
c.ShimPid = shim.Pid
err = stateKey.Set(c.ID, keyShimPid, shim.Pid)
if err != nil {
return err
}
if pidFile != "" {
if err = createPidFile(pidFile, shim.Pid); err != nil {
return err
}
}
return nil
}
func (c *container) Close() error {
if c.hc == nil {
return nil
}
return c.hc.Close()
}
func (c *container) Exec() error {
err := c.hc.Start()
if err != nil {
return err
}
if c.Spec.Process == nil {
return nil
}
// Alert the shim that the container is ready.
pipe, err := winio.DialPipe(c.ShimPipePath(), nil)
if err != nil {
return err
}
defer pipe.Close()
shim, err := os.FindProcess(c.ShimPid)
if err != nil {
return err
}
defer shim.Release()
err = runhcs.GetErrorFromPipe(pipe, shim)
if err != nil {
return err
}
return nil
}
func getContainer(id string, notStopped bool) (*container, error) {
var c container
err := stateKey.Get(id, keyState, &c.persistedState)
if err != nil {
return nil, err
}
err = stateKey.Get(id, keyShimPid, &c.ShimPid)
if err != nil {
if _, ok := err.(*regstate.NoStateError); !ok {
return nil, err
}
c.ShimPid = -1
}
if notStopped && c.ShimPid == 0 {
return nil, errContainerStopped
}
hc, err := hcs.OpenComputeSystem(c.ID)
if err == nil {
c.hc = hc
} else if !hcs.IsNotExist(err) {
return nil, err
} else if notStopped {
return nil, errContainerStopped
}
return &c, nil
}
func (c *container) Remove() error {
// Unmount any layers or mapped volumes.
err := c.Unmount(!c.IsHost)
if err != nil {
return err
}
// Follow kata's example and delay tearing down the VM until the owning
// container is removed.
if c.IsHost {
vm, err := hcs.OpenComputeSystem(vmID(c.ID))
if err == nil {
if err := vm.Terminate(); hcs.IsPending(err) {
vm.Wait()
}
}
}
return stateKey.Remove(c.ID)
}
func (c *container) Kill() error {
if c.hc == nil {
return nil
}
err := c.hc.Terminate()
if hcs.IsPending(err) {
err = c.hc.Wait()
}
if hcs.IsAlreadyStopped(err) {
err = nil
}
return err
}
func (c *container) Status() (containerStatus, error) {
if c.hc == nil || c.ShimPid == 0 {
return containerStopped, nil
}
props, err := c.hc.Properties()
if err != nil {
if !strings.Contains(err.Error(), "operation is not valid in the current state") {
return "", err
}
return containerUnknown, nil
}
state := containerUnknown
switch props.State {
case "", "Created":
state = containerCreated
case "Running":
state = containerRunning
case "Paused":
state = containerPaused
case "Stopped":
state = containerStopped
}
return state, nil
}

View File

@ -1,71 +0,0 @@
package main
import (
"os"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
gcsclient "github.com/Microsoft/opengcs/client"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
var createScratchCommand = cli.Command{
Name: "create-scratch",
Usage: "creates a scratch vhdx at 'destpath' that is ext4 formatted",
Description: "Creates a scratch vhdx at 'destpath' that is ext4 formatted",
Flags: []cli.Flag{
cli.StringFlag{
Name: "destpath",
Usage: "Required: describes the destination vhd path",
},
},
Before: appargs.Validate(),
Action: func(context *cli.Context) error {
dest := context.String("destpath")
if dest == "" {
return errors.New("'destpath' is required")
}
// If we only have v1 lcow support do it the old way.
if osversion.Get().Build < osversion.RS5 {
cfg := gcsclient.Config{
Options: gcsclient.Options{
KirdPath: filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers"),
KernelFile: "kernel",
InitrdFile: uvm.InitrdFile,
},
Name: "createscratch-uvm",
UvmTimeoutSeconds: 5 * 60, // 5 Min
}
if err := cfg.StartUtilityVM(); err != nil {
return errors.Wrapf(err, "failed to start '%s'", cfg.Name)
}
defer cfg.Uvm.Terminate()
if err := cfg.CreateExt4Vhdx(dest, lcow.DefaultScratchSizeGB, ""); err != nil {
return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", cfg.Name)
}
} else {
opts := uvm.NewDefaultOptionsLCOW("createscratch-uvm", context.GlobalString("owner"))
convertUVM, err := uvm.CreateLCOW(opts)
if err != nil {
return errors.Wrapf(err, "failed to create '%s'", opts.ID)
}
defer convertUVM.Close()
if err := convertUVM.Start(); err != nil {
return errors.Wrapf(err, "failed to start '%s'", opts.ID)
}
if err := lcow.CreateScratch(convertUVM, dest, lcow.DefaultScratchSizeGB, "", ""); err != nil {
return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", opts.ID)
}
}
return nil
},
}

View File

@ -1,100 +0,0 @@
package main
import (
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var createRunFlags = []cli.Flag{
cli.StringFlag{
Name: "bundle, b",
Value: "",
Usage: `path to the root of the bundle directory, defaults to the current directory`,
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "specify the file to write the process id to",
},
cli.StringFlag{
Name: "shim-log",
Value: "",
Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-<container-id>-shim-log) for the launched shim process`,
},
cli.StringFlag{
Name: "vm-log",
Value: "",
Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-<container-id>-vm-log) for the launched VM shim process`,
},
cli.StringFlag{
Name: "vm-console",
Value: "",
Usage: `path to the pipe for the VM's console (e.g. \\.\pipe\debugpipe)`,
},
cli.StringFlag{
Name: "host",
Value: "",
Usage: "host container whose VM this container should run in",
},
}
var createCommand = cli.Command{
Name: "create",
Usage: "create a container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.`,
Description: `The create command creates an instance of a container for a bundle. The bundle
is a directory with a specification file named "` + specConfig + `" and a root
filesystem.
The specification file includes an args parameter. The args parameter is used
to specify command(s) that get run when the container is started. To change the
command(s) that get executed on start, edit the args parameter of the spec. See
"runc spec --help" for more explanation.`,
Flags: append(createRunFlags),
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
cfg, err := containerConfigFromContext(context)
if err != nil {
return err
}
_, err = createContainer(cfg)
if err != nil {
return err
}
return nil
},
}
func containerConfigFromContext(context *cli.Context) (*containerConfig, error) {
id := context.Args().First()
pidFile, err := absPathOrEmpty(context.String("pid-file"))
if err != nil {
return nil, err
}
shimLog, err := absPathOrEmpty(context.String("shim-log"))
if err != nil {
return nil, err
}
vmLog, err := absPathOrEmpty(context.String("vm-log"))
if err != nil {
return nil, err
}
spec, err := setupSpec(context)
if err != nil {
return nil, err
}
return &containerConfig{
ID: id,
Owner: context.GlobalString("owner"),
PidFile: pidFile,
ShimLogFile: shimLog,
VMLogFile: vmLog,
VMConsolePipe: context.String("vm-console"),
Spec: spec,
HostID: context.String("host"),
}, nil
}

View File

@ -1,73 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/regstate"
"github.com/urfave/cli"
)
var deleteCommand = cli.Command{
Name: "delete",
Usage: "delete any resources held by the container often used with detached container",
ArgsUsage: `<container-id>
Where "<container-id>" is the name for the instance of the container.
EXAMPLE:
For example, if the container id is "ubuntu01" and runhcs list currently shows the
status of "ubuntu01" as "stopped" the following will delete resources held for
"ubuntu01" removing "ubuntu01" from the runhcs list of containers:
# runhcs delete ubuntu01`,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "Forcibly deletes the container if it is still running (uses SIGKILL)",
},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
force := context.Bool("force")
container, err := getContainer(id, false)
if err != nil {
if _, ok := err.(*regstate.NoStateError); ok {
if e := stateKey.Remove(id); e != nil {
fmt.Fprintf(os.Stderr, "remove %s: %v\n", id, e)
}
if force {
return nil
}
}
return err
}
defer container.Close()
s, err := container.Status()
if err != nil {
return err
}
kill := false
switch s {
case containerStopped:
case containerCreated:
kill = true
default:
if !force {
return fmt.Errorf("cannot delete container %s that is not stopped: %s\n", id, s)
}
kill = true
}
if kill {
err = container.Kill()
if err != nil {
return err
}
}
return container.Remove()
},
}

View File

@ -1,160 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/urfave/cli"
)
var execCommand = cli.Command{
Name: "exec",
Usage: "execute new process inside the container",
ArgsUsage: `<container-id> <command> [command options] || -p process.json <container-id>
Where "<container-id>" is the name for the instance of the container and
"<command>" is the command to be executed in the container.
"<command>" can't be empty unless a "-p" flag provided.
EXAMPLE:
For example, if the container is configured to run the linux ps command the
following will output a list of processes running in the container:
# runhcs exec <container-id> ps`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "cwd",
Usage: "current working directory in the container",
},
cli.StringSliceFlag{
Name: "env, e",
Usage: "set environment variables",
},
cli.BoolFlag{
Name: "tty, t",
Usage: "allocate a pseudo-TTY",
},
cli.StringFlag{
Name: "user, u",
},
cli.StringFlag{
Name: "process, p",
Usage: "path to the process.json",
},
cli.BoolFlag{
Name: "detach,d",
Usage: "detach from the container's process",
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "specify the file to write the process id to",
},
cli.StringFlag{
Name: "shim-log",
Value: "",
Usage: `path to the log file or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-<container-id>-<exec-id>-log) for the launched shim process`,
},
},
Before: appargs.Validate(argID, appargs.Rest(appargs.String)),
Action: func(context *cli.Context) error {
id := context.Args().First()
pidFile, err := absPathOrEmpty(context.String("pid-file"))
if err != nil {
return err
}
shimLog, err := absPathOrEmpty(context.String("shim-log"))
if err != nil {
return err
}
c, err := getContainer(id, false)
if err != nil {
return err
}
defer c.Close()
status, err := c.Status()
if err != nil {
return err
}
if status != containerRunning {
return errContainerStopped
}
spec, err := getProcessSpec(context, c)
if err != nil {
return err
}
p, err := startProcessShim(id, pidFile, shimLog, spec)
if err != nil {
return err
}
if !context.Bool("detach") {
state, err := p.Wait()
if err != nil {
return err
}
os.Exit(int(state.Sys().(syscall.WaitStatus).ExitCode))
}
return nil
},
SkipArgReorder: true,
}
func getProcessSpec(context *cli.Context, c *container) (*specs.Process, error) {
if path := context.String("process"); path != "" {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var p specs.Process
if err := json.NewDecoder(f).Decode(&p); err != nil {
return nil, err
}
return &p, validateProcessSpec(&p)
}
// process via cli flags
p := c.Spec.Process
if len(context.Args()) == 1 {
return nil, fmt.Errorf("process args cannot be empty")
}
p.Args = context.Args()[1:]
// override the cwd, if passed
if context.String("cwd") != "" {
p.Cwd = context.String("cwd")
}
// append the passed env variables
p.Env = append(p.Env, context.StringSlice("env")...)
// set the tty
if context.IsSet("tty") {
p.Terminal = context.Bool("tty")
}
// override the user, if passed
if context.String("user") != "" {
p.User.Username = context.String("user")
}
return p, nil
}
func validateProcessSpec(spec *specs.Process) error {
if spec.Cwd == "" {
return fmt.Errorf("Cwd property must not be empty")
}
// IsAbs doesnt recognize Unix paths on Windows builds so handle that case
// here.
if !filepath.IsAbs(spec.Cwd) && !strings.HasPrefix(spec.Cwd, "/") {
return fmt.Errorf("Cwd must be an absolute path")
}
if len(spec.Args) == 0 {
return fmt.Errorf("args must not be empty")
}
return nil
}

View File

@ -1,178 +0,0 @@
package main
import (
"strconv"
"strings"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/osversion"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
var killCommand = cli.Command{
Name: "kill",
Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process",
ArgsUsage: `<container-id> [signal]
Where "<container-id>" is the name for the instance of the container and
"[signal]" is the signal to be sent to the init process.
EXAMPLE:
For example, if the container id is "ubuntu01" the following will send a "KILL"
signal to the init process of the "ubuntu01" container:
# runhcs kill ubuntu01 KILL`,
Flags: []cli.Flag{},
Before: appargs.Validate(argID, appargs.Optional(appargs.String)),
Action: func(context *cli.Context) error {
id := context.Args().First()
c, err := getContainer(id, true)
if err != nil {
return err
}
defer c.Close()
status, err := c.Status()
if err != nil {
return err
}
if status != containerRunning {
return errContainerStopped
}
signalsSupported := false
// The Signal feature was added in RS5
if osversion.Get().Build >= osversion.RS5 {
if c.IsHost || c.HostID != "" {
var hostID string
if c.IsHost {
// This is the LCOW, Pod Sandbox, or Windows Xenon V2 for RS5+
hostID = vmID(c.ID)
} else {
// This is the Nth container in a Pod
hostID = c.HostID
}
uvm, err := hcs.OpenComputeSystem(hostID)
if err != nil {
return err
}
defer uvm.Close()
if props, err := uvm.Properties(schema1.PropertyTypeGuestConnection); err == nil &&
props.GuestConnectionInfo.GuestDefinedCapabilities.SignalProcessSupported {
signalsSupported = true
}
} else if c.Spec.Linux == nil && c.Spec.Windows.HyperV == nil {
// RS5+ Windows Argon
signalsSupported = true
}
}
signal := 0
if signalsSupported {
signal, err = validateSigstr(context.Args().Get(1), signalsSupported, c.Spec.Linux != nil)
if err != nil {
return err
}
}
var pid int
if err := stateKey.Get(id, keyInitPid, &pid); err != nil {
return err
}
p, err := c.hc.OpenProcess(pid)
if err != nil {
return err
}
defer p.Close()
if signalsSupported && (c.Spec.Linux != nil || !c.Spec.Process.Terminal) {
opts := guestrequest.SignalProcessOptions{
Signal: signal,
}
return p.Signal(opts)
}
// Legacy signal issue a kill
return p.Kill()
},
}
func validateSigstr(sigstr string, signalsSupported bool, isLcow bool) (int, error) {
errInvalidSignal := errors.Errorf("invalid signal '%s'", sigstr)
// All flavors including legacy default to SIGTERM on LCOW CtrlC on Windows
if sigstr == "" {
if isLcow {
return 0xf, nil
}
return 0, nil
}
sigstr = strings.ToUpper(sigstr)
if !signalsSupported {
// If signals arent supported we just validate that its a known signal.
// We already return 0 since we only supported a platform Kill() at that
// time.
if isLcow {
switch sigstr {
case "15":
fallthrough
case "TERM":
fallthrough
case "SIGTERM":
return 0, nil
default:
return 0, errInvalidSignal
}
}
switch sigstr {
// Docker sends a UNIX term in the supported Windows Signal map.
case "15":
fallthrough
case "TERM":
fallthrough
case "0":
fallthrough
case "CTRLC":
return 0, nil
case "9":
fallthrough
case "KILL":
return 0, nil
default:
return 0, errInvalidSignal
}
}
var sigmap map[string]int
if isLcow {
sigmap = signalMapLcow
} else {
sigmap = signalMapWindows
}
signal, err := strconv.Atoi(sigstr)
if err != nil {
// Signal might still match the string value
for k, v := range sigmap {
if k == sigstr {
return v, nil
}
}
return 0, errInvalidSignal
}
// Match signal by value
for _, v := range sigmap {
if signal == v {
return signal, nil
}
}
return 0, errInvalidSignal
}

View File

@ -1,95 +0,0 @@
package main
import (
"fmt"
"strconv"
"strings"
"testing"
)
func runValidateSigstrTest(sigstr string, signalsSupported, isLcow bool,
expectedSignal int, expectedError bool, t *testing.T) {
signal, err := validateSigstr(sigstr, signalsSupported, isLcow)
if expectedError {
if err == nil {
t.Fatalf("Expected err: %v, got: nil", expectedError)
} else if err.Error() != fmt.Sprintf("invalid signal '%s'", sigstr) {
t.Fatalf("Expected err: %v, got: %v", expectedError, err)
}
}
if signal != expectedSignal {
t.Fatalf("Test - Signal: %s, Support: %v, LCOW: %v\nExpected signal: %v, got: %v",
sigstr, signalsSupported, isLcow,
expectedSignal, signal)
}
}
func TestValidateSigstrEmpty(t *testing.T) {
runValidateSigstrTest("", false, false, 0, false, t)
runValidateSigstrTest("", false, true, 0xf, false, t)
runValidateSigstrTest("", true, false, 0, false, t)
runValidateSigstrTest("", true, true, 0xf, false, t)
}
func TestValidateSigstrDefaultLCOW(t *testing.T) {
runValidateSigstrTest("15", false, true, 0, false, t)
runValidateSigstrTest("TERM", false, true, 0, false, t)
runValidateSigstrTest("SIGTERM", false, true, 0, false, t)
}
func TestValidateSigstrDefaultLCOWInvalid(t *testing.T) {
runValidateSigstrTest("2", false, true, 0, true, t)
runValidateSigstrTest("test", false, true, 0, true, t)
}
func TestValidateSigstrDefaultWCOW(t *testing.T) {
runValidateSigstrTest("15", false, false, 0, false, t)
runValidateSigstrTest("TERM", false, false, 0, false, t)
runValidateSigstrTest("0", false, false, 0, false, t)
runValidateSigstrTest("CTRLC", false, false, 0, false, t)
runValidateSigstrTest("9", false, false, 0, false, t)
runValidateSigstrTest("KILL", false, false, 0, false, t)
}
func TestValidateSigstrDefaultWCOWInvalid(t *testing.T) {
runValidateSigstrTest("2", false, false, 0, true, t)
runValidateSigstrTest("test", false, false, 0, true, t)
}
func TestValidateSignalStringLCOW(t *testing.T) {
for k, v := range signalMapLcow {
runValidateSigstrTest(k, true, true, v, false, t)
// run it again with a case not in the map
lc := strings.ToLower(k)
if k == lc {
t.Fatalf("Expected lower casing - map: %v, got: %v", k, lc)
}
runValidateSigstrTest(lc, true, true, v, false, t)
}
}
func TestValidateSignalStringWCOW(t *testing.T) {
for k, v := range signalMapWindows {
runValidateSigstrTest(k, true, false, v, false, t)
// run it again with a case not in the map
lc := strings.ToLower(k)
if k == lc {
t.Fatalf("Expected lower casing - map: %v, got: %v", k, lc)
}
runValidateSigstrTest(lc, true, false, v, false, t)
}
}
func TestValidateSignalValueLCOW(t *testing.T) {
for _, v := range signalMapLcow {
str := strconv.Itoa(v)
runValidateSigstrTest(str, true, true, v, false, t)
}
}
func TestValidateSignalValueWCOW(t *testing.T) {
for _, v := range signalMapWindows {
str := strconv.Itoa(v)
runValidateSigstrTest(str, true, false, v, false, t)
}
}

View File

@ -1,116 +0,0 @@
package main
import (
"fmt"
"os"
"text/tabwriter"
"time"
"encoding/json"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/urfave/cli"
)
const formatOptions = `table or json`
var listCommand = cli.Command{
Name: "list",
Usage: "lists containers started by runhcs with the given root",
ArgsUsage: `
Where the given root is specified via the global option "--root"
(default: "/run/runhcs").
EXAMPLE 1:
To list containers created via the default "--root":
# runhcs list
EXAMPLE 2:
To list containers created using a non-default value for "--root":
# runhcs --root value list`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Value: "table",
Usage: `select one of: ` + formatOptions,
},
cli.BoolFlag{
Name: "quiet, q",
Usage: "display only container IDs",
},
},
Before: appargs.Validate(),
Action: func(context *cli.Context) error {
s, err := getContainers(context)
if err != nil {
return err
}
if context.Bool("quiet") {
for _, item := range s {
fmt.Println(item.ID)
}
return nil
}
switch context.String("format") {
case "table":
w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0)
fmt.Fprint(w, "ID\tPID\tSTATUS\tBUNDLE\tCREATED\tOWNER\n")
for _, item := range s {
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n",
item.ID,
item.InitProcessPid,
item.Status,
item.Bundle,
item.Created.Format(time.RFC3339Nano),
item.Owner)
}
if err := w.Flush(); err != nil {
return err
}
case "json":
if err := json.NewEncoder(os.Stdout).Encode(s); err != nil {
return err
}
default:
return fmt.Errorf("invalid format option")
}
return nil
},
}
func getContainers(context *cli.Context) ([]runhcs.ContainerState, error) {
ids, err := stateKey.Enumerate()
if err != nil {
return nil, err
}
var s []runhcs.ContainerState
for _, id := range ids {
c, err := getContainer(id, false)
if err != nil {
fmt.Fprintf(os.Stderr, "reading state for %s: %v\n", id, err)
continue
}
status, err := c.Status()
if err != nil {
fmt.Fprintf(os.Stderr, "reading status for %s: %v\n", id, err)
}
s = append(s, runhcs.ContainerState{
ID: id,
Version: c.Spec.Version,
InitProcessPid: c.ShimPid,
Status: string(status),
Bundle: c.Bundle,
Rootfs: c.Rootfs,
Created: c.Created,
Annotations: c.Spec.Annotations,
})
c.Close()
}
return s, nil
}

View File

@ -1,174 +0,0 @@
package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/pkg/etwlogrus"
"github.com/Microsoft/hcsshim/internal/regstate"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
// Add a manifest to get proper Windows version detection.
//
// goversioninfo can be installed with "go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo"
//go:generate goversioninfo -platform-specific
// version will be populated by the Makefile, read from
// VERSION file of the source code.
var version = ""
// gitCommit will be the hash that the binary was built from
// and will be populated by the Makefile
var gitCommit = ""
var stateKey *regstate.Key
var logFormat string
const (
specConfig = "config.json"
usage = `Open Container Initiative runtime for Windows
runhcs is a fork of runc, modified to run containers on Windows with or without Hyper-V isolation. Like runc, it is a command line client for running applications packaged according to the Open Container Initiative (OCI) format.
runhcs integrates with existing process supervisors to provide a production container runtime environment for applications. It can be used with your existing process monitoring tools and the container will be spawned as a direct child of the process supervisor.
Containers are configured using bundles. A bundle for a container is a directory that includes a specification file named "` + specConfig + `". Bundle contents will depend on the container type.
To start a new instance of a container:
# runhcs run [ -b bundle ] <container-id>
Where "<container-id>" is your name for the instance of the container that you are starting. The name you provide for the container instance must be unique on your host. Providing the bundle directory using "-b" is optional. The default value for "bundle" is the current directory.`
)
func main() {
// Provider ID: 0b52781f-b24d-5685-ddf6-69830ed40ec3
// Hook isn't closed explicitly, as it will exist until process exit.
if hook, err := etwlogrus.NewHook("Microsoft.Virtualization.RunHCS"); err == nil {
logrus.AddHook(hook)
} else {
logrus.Error(err)
}
app := cli.NewApp()
app.Name = "runhcs"
app.Usage = usage
var v []string
if version != "" {
v = append(v, version)
}
if gitCommit != "" {
v = append(v, fmt.Sprintf("commit: %s", gitCommit))
}
v = append(v, fmt.Sprintf("spec: %s", specs.Version))
app.Version = strings.Join(v, "\n")
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output for logging",
},
cli.StringFlag{
Name: "log",
Value: "nul",
Usage: `set the log file path or named pipe (e.g. \\.\pipe\ProtectedPrefix\Administrators\runhcs-log) where internal debug information is written`,
},
cli.StringFlag{
Name: "log-format",
Value: "text",
Usage: "set the format used by logs ('text' (default), or 'json')",
},
cli.StringFlag{
Name: "owner",
Value: "runhcs",
Usage: "compute system owner",
},
cli.StringFlag{
Name: "root",
Value: "default",
Usage: "registry key for storage of container state",
},
}
app.Commands = []cli.Command{
createCommand,
createScratchCommand,
deleteCommand,
// eventsCommand,
execCommand,
killCommand,
listCommand,
pauseCommand,
psCommand,
resizeTtyCommand,
resumeCommand,
runCommand,
shimCommand,
startCommand,
stateCommand,
// updateCommand,
vmshimCommand,
}
app.Before = func(context *cli.Context) error {
if context.GlobalBool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
if path := context.GlobalString("log"); path != "" {
var f io.Writer
var err error
if strings.HasPrefix(path, runhcs.SafePipePrefix) {
f, err = winio.DialPipe(path, nil)
} else {
f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666)
}
if err != nil {
return err
}
logrus.SetOutput(f)
}
switch logFormat = context.GlobalString("log-format"); logFormat {
case "text":
// retain logrus's default.
case "json":
logrus.SetFormatter(new(logrus.JSONFormatter))
default:
return fmt.Errorf("unknown log-format %q", logFormat)
}
var err error
stateKey, err = regstate.Open(context.GlobalString("root"), false)
if err != nil {
return err
}
return nil
}
// If the command returns an error, cli takes upon itself to print
// the error on cli.ErrWriter and exit.
// Use our own writer here to ensure the log gets sent to the right location.
fatalWriter.Writer = cli.ErrWriter
cli.ErrWriter = &fatalWriter
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(cli.ErrWriter, err)
os.Exit(1)
}
}
type logErrorWriter struct {
Writer io.Writer
}
var fatalWriter logErrorWriter
func (f *logErrorWriter) Write(p []byte) (n int, err error) {
logrus.Error(string(p))
return f.Writer.Write(p)
}

View File

@ -1,58 +0,0 @@
package main
import (
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var pauseCommand = cli.Command{
Name: "pause",
Usage: "pause suspends all processes inside the container",
ArgsUsage: `<container-id>
Where "<container-id>" is the name for the instance of the container to be
paused. `,
Description: `The pause command suspends all processes in the instance of the container.
Use runhcs list to identify instances of containers and their current status.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, true)
if err != nil {
return err
}
defer container.Close()
if err := container.hc.Pause(); err != nil {
return err
}
return nil
},
}
var resumeCommand = cli.Command{
Name: "resume",
Usage: "resumes all processes that have been previously paused",
ArgsUsage: `<container-id>
Where "<container-id>" is the name for the instance of the container to be
resumed.`,
Description: `The resume command resumes all processes in the instance of the container.
Use runhcs list to identify instances of containers and their current status.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, true)
if err != nil {
return err
}
defer container.Close()
if err := container.hc.Resume(); err != nil {
return err
}
return nil
},
}

View File

@ -1,51 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/urfave/cli"
)
var psCommand = cli.Command{
Name: "ps",
Usage: "ps displays the processes running inside a container",
ArgsUsage: `<container-id> [ps options]`,
Flags: []cli.Flag{
cli.StringFlag{
Name: "format, f",
Value: "json",
Usage: `select one of: ` + formatOptions,
},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, true)
if err != nil {
return err
}
defer container.Close()
props, err := container.hc.Properties(schema1.PropertyTypeProcessList)
if err != nil {
return err
}
var pids []int
for _, p := range props.ProcessList {
pids = append(pids, int(p.ProcessId))
}
switch context.String("format") {
case "json":
return json.NewEncoder(os.Stdout).Encode(pids)
default:
return fmt.Errorf("invalid format option")
}
},
SkipArgReorder: true,
}

View File

@ -1,64 +0,0 @@
package main
import (
"os"
"syscall"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
// default action is to start a container
var runCommand = cli.Command{
Name: "run",
Usage: "create and run a container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.`,
Description: `The run command creates an instance of a container for a bundle. The bundle
is a directory with a specification file named "` + specConfig + `" and a root
filesystem.
The specification file includes an args parameter. The args parameter is used
to specify command(s) that get run when the container is started. To change the
command(s) that get executed on start, edit the args parameter of the spec.`,
Flags: append(createRunFlags,
cli.BoolFlag{
Name: "detach, d",
Usage: "detach from the container's process",
},
),
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
cfg, err := containerConfigFromContext(context)
if err != nil {
return err
}
c, err := createContainer(cfg)
if err != nil {
return err
}
if err != nil {
return err
}
p, err := os.FindProcess(c.ShimPid)
if err != nil {
return err
}
err = c.Exec()
if err != nil {
return err
}
if !context.Bool("detach") {
state, err := p.Wait()
if err != nil {
return err
}
c.Remove()
os.Exit(int(state.Sys().(syscall.WaitStatus).ExitCode))
}
return nil
},
}

View File

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<description>runhcs</description>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
</assembly>

View File

@ -1,323 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strings"
"sync"
"time"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/Microsoft/hcsshim/internal/schema2"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"golang.org/x/sys/windows"
)
func containerPipePath(id string) string {
return runhcs.SafePipePath("runhcs-shim-" + id)
}
func newFile(context *cli.Context, param string) *os.File {
fd := uintptr(context.Int(param))
if fd == 0 {
return nil
}
return os.NewFile(fd, "")
}
var shimCommand = cli.Command{
Name: "shim",
Usage: `launch the process and proxy stdio (do not call it outside of runhcs)`,
Hidden: true,
Flags: []cli.Flag{
&cli.IntFlag{Name: "stdin", Hidden: true},
&cli.IntFlag{Name: "stdout", Hidden: true},
&cli.IntFlag{Name: "stderr", Hidden: true},
&cli.BoolFlag{Name: "exec", Hidden: true},
cli.StringFlag{Name: "log-pipe", Hidden: true},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
logPipe := context.String("log-pipe")
if logPipe != "" {
lpc, err := winio.DialPipe(logPipe, nil)
if err != nil {
return err
}
defer lpc.Close()
logrus.SetOutput(lpc)
} else {
logrus.SetOutput(os.Stderr)
}
fatalWriter.Writer = os.Stdout
id := context.Args().First()
c, err := getContainer(id, true)
if err != nil {
return err
}
defer c.Close()
// Asynchronously wait for the container to exit.
containerExitCh := make(chan error)
go func() {
containerExitCh <- c.hc.Wait()
}()
// Get File objects for the open stdio files passed in as arguments.
stdin := newFile(context, "stdin")
stdout := newFile(context, "stdout")
stderr := newFile(context, "stderr")
exec := context.Bool("exec")
terminateOnFailure := false
errorOut := io.WriteCloser(os.Stdout)
var spec *specs.Process
if exec {
// Read the process spec from stdin.
specj, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
os.Stdin.Close()
spec = new(specs.Process)
err = json.Unmarshal(specj, spec)
if err != nil {
return err
}
} else {
// Stdin is not used.
os.Stdin.Close()
// Listen on the named pipe associated with this container.
l, err := winio.ListenPipe(c.ShimPipePath(), nil)
if err != nil {
return err
}
// Alert the parent process that initialization has completed
// successfully.
errorOut.Write(runhcs.ShimSuccess)
errorOut.Close()
fatalWriter.Writer = ioutil.Discard
// When this process exits, clear this process's pid in the registry.
defer func() {
stateKey.Set(id, keyShimPid, 0)
}()
defer func() {
if terminateOnFailure {
if err = c.hc.Terminate(); hcs.IsPending(err) {
<-containerExitCh
}
}
}()
terminateOnFailure = true
// Wait for a connection to the named pipe, exiting if the container
// exits before this happens.
var pipe net.Conn
pipeCh := make(chan error)
go func() {
var err error
pipe, err = l.Accept()
pipeCh <- err
}()
select {
case err = <-pipeCh:
if err != nil {
return err
}
case err = <-containerExitCh:
if err != nil {
return err
}
return cli.NewExitError("", 1)
}
// The next set of errors goes to the open pipe connection.
errorOut = pipe
fatalWriter.Writer = pipe
// The process spec comes from the original container spec.
spec = c.Spec.Process
}
// Create the process in the container.
var wpp *hcsschema.ProcessParameters // Windows Process Parameters
var lpp *lcow.ProcessParameters // Linux Process Parameters
var p *hcs.Process
if c.Spec.Linux == nil {
environment := make(map[string]string)
for _, v := range spec.Env {
s := strings.SplitN(v, "=", 2)
if len(s) == 2 && len(s[1]) > 0 {
environment[s[0]] = s[1]
}
}
wpp = &hcsschema.ProcessParameters{
WorkingDirectory: spec.Cwd,
EmulateConsole: spec.Terminal,
Environment: environment,
User: spec.User.Username,
}
for i, arg := range spec.Args {
e := windows.EscapeArg(arg)
if i == 0 {
wpp.CommandLine = e
} else {
wpp.CommandLine += " " + e
}
}
if spec.ConsoleSize != nil {
wpp.ConsoleSize = []int32{
int32(spec.ConsoleSize.Height),
int32(spec.ConsoleSize.Width),
}
}
wpp.CreateStdInPipe = stdin != nil
wpp.CreateStdOutPipe = stdout != nil
wpp.CreateStdErrPipe = stderr != nil
p, err = c.hc.CreateProcess(wpp)
} else {
lpp = &lcow.ProcessParameters{}
if exec {
lpp.OCIProcess = spec
}
lpp.CreateStdInPipe = stdin != nil
lpp.CreateStdOutPipe = stdout != nil
lpp.CreateStdErrPipe = stderr != nil
p, err = c.hc.CreateProcess(lpp)
}
if err != nil {
return err
}
cstdin, cstdout, cstderr, err := p.Stdio()
if err != nil {
return err
}
if !exec {
err = stateKey.Set(c.ID, keyInitPid, p.Pid())
if err != nil {
return err
}
}
// Store the Guest pid map
err = stateKey.Set(c.ID, fmt.Sprintf(keyPidMapFmt, os.Getpid()), p.Pid())
if err != nil {
return err
}
defer func() {
// Remove the Guest pid map when this process is cleaned up
stateKey.Clear(c.ID, fmt.Sprintf(keyPidMapFmt, os.Getpid()))
}()
terminateOnFailure = false
// Alert the connected process that the process was launched
// successfully.
errorOut.Write(runhcs.ShimSuccess)
errorOut.Close()
fatalWriter.Writer = ioutil.Discard
// Relay stdio.
var wg sync.WaitGroup
if cstdin != nil {
go func() {
io.Copy(cstdin, stdin)
cstdin.Close()
p.CloseStdin()
}()
}
if cstdout != nil {
wg.Add(1)
go func() {
io.Copy(stdout, cstdout)
stdout.Close()
cstdout.Close()
wg.Done()
}()
}
if cstderr != nil {
wg.Add(1)
go func() {
io.Copy(stderr, cstderr)
stderr.Close()
cstderr.Close()
wg.Done()
}()
}
err = p.Wait()
wg.Wait()
// Attempt to get the exit code from the process.
code := 1
if err == nil {
code, err = p.ExitCode()
if err != nil {
code = 1
}
}
if !exec {
// Shutdown the container, waiting 5 minutes before terminating is
// forcefully.
const shutdownTimeout = time.Minute * 5
waited := false
err = c.hc.Shutdown()
if hcs.IsPending(err) {
select {
case err = <-containerExitCh:
waited = true
case <-time.After(shutdownTimeout):
err = hcs.ErrTimeout
}
}
if hcs.IsAlreadyStopped(err) {
err = nil
}
if err != nil {
err = c.hc.Terminate()
if waited {
err = c.hc.Wait()
} else {
err = <-containerExitCh
}
}
}
return cli.NewExitError("", code)
},
}

View File

@ -1,48 +0,0 @@
package main
var signalMapLcow = map[string]int{
"ABRT": 0x6,
"ALRM": 0xe,
"BUS": 0x7,
"CHLD": 0x11,
"CLD": 0x11,
"CONT": 0x12,
"FPE": 0x8,
"HUP": 0x1,
"ILL": 0x4,
"INT": 0x2,
"IO": 0x1d,
"IOT": 0x6,
"KILL": 0x9,
"PIPE": 0xd,
"POLL": 0x1d,
"PROF": 0x1b,
"PWR": 0x1e,
"QUIT": 0x3,
"SEGV": 0xb,
"STKFLT": 0x10,
"STOP": 0x13,
"SYS": 0x1f,
"TERM": 0xf,
"TRAP": 0x5,
"TSTP": 0x14,
"TTIN": 0x15,
"TTOU": 0x16,
"URG": 0x17,
"USR1": 0xa,
"USR2": 0xc,
"VTALRM": 0x1a,
"WINCH": 0x1c,
"XCPU": 0x18,
"XFSZ": 0x19,
}
var signalMapWindows = map[string]int{
"CTRLC": 0x0,
"CTRLBREAK": 0x1,
"CTRLCLOSE": 0x2,
"CTRLLOGOFF": 0x5,
"CTRLSHUTDOWN": 0x6,
"TERM": 0x0, // Docker sends the UNIX signal. Convert to CTRLC
"KILL": 0x6, // Docker sends the UNIX signal. Convert to CTRLSHUTDOWN
}

View File

@ -1,42 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/urfave/cli"
)
// loadSpec loads the specification from the provided path.
func loadSpec(cPath string) (spec *specs.Spec, err error) {
cf, err := os.Open(cPath)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("JSON specification file %s not found", cPath)
}
return nil, err
}
defer cf.Close()
if err = json.NewDecoder(cf).Decode(&spec); err != nil {
return nil, err
}
return spec, nil
}
// setupSpec performs initial setup based on the cli.Context for the container
func setupSpec(context *cli.Context) (*specs.Spec, error) {
bundle := context.String("bundle")
if bundle != "" {
if err := os.Chdir(bundle); err != nil {
return nil, err
}
}
spec, err := loadSpec(specConfig)
if err != nil {
return nil, err
}
return spec, nil
}

View File

@ -1,43 +0,0 @@
package main
import (
"errors"
"fmt"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var startCommand = cli.Command{
Name: "start",
Usage: "executes the user defined process in a created container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container that you
are starting. The name you provide for the container instance must be unique on
your host.`,
Description: `The start command executes the user defined process in a created container.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
container, err := getContainer(id, false)
if err != nil {
return err
}
defer container.Close()
status, err := container.Status()
if err != nil {
return err
}
switch status {
case containerCreated:
return container.Exec()
case containerStopped:
return errors.New("cannot start a container that has stopped")
case containerRunning:
return errors.New("cannot start an already running container")
default:
return fmt.Errorf("cannot start a container in the '%s' state", status)
}
},
}

View File

@ -1,49 +0,0 @@
package main
import (
"encoding/json"
"os"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/urfave/cli"
)
var stateCommand = cli.Command{
Name: "state",
Usage: "output the state of a container",
ArgsUsage: `<container-id>
Where "<container-id>" is your name for the instance of the container.`,
Description: `The state command outputs current state information for the
instance of a container.`,
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
id := context.Args().First()
c, err := getContainer(id, false)
if err != nil {
return err
}
defer c.Close()
status, err := c.Status()
if err != nil {
return err
}
cs := runhcs.ContainerState{
Version: c.Spec.Version,
ID: c.ID,
InitProcessPid: c.ShimPid,
Status: string(status),
Bundle: c.Bundle,
Rootfs: c.Rootfs,
Created: c.Created,
Annotations: c.Spec.Annotations,
}
data, err := json.MarshalIndent(cs, "", " ")
if err != nil {
return err
}
os.Stdout.Write(data)
return nil
},
}

View File

@ -1,56 +0,0 @@
package main
import (
"fmt"
"strconv"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var resizeTtyCommand = cli.Command{
Name: "resize-tty",
Usage: "resize-tty updates the terminal size for a container process",
ArgsUsage: `<container-id> <width> <height>`,
Flags: []cli.Flag{
&cli.IntFlag{
Name: "pid, p",
Usage: "the process pid (defaults to init pid)",
},
},
Before: appargs.Validate(
argID,
appargs.Int(10, 1, 65535),
appargs.Int(10, 1, 65535),
),
Action: func(context *cli.Context) error {
id := context.Args()[0]
width, _ := strconv.ParseUint(context.Args()[1], 10, 16)
height, _ := strconv.ParseUint(context.Args()[2], 10, 16)
c, err := getContainer(id, true)
if err != nil {
return err
}
defer c.Close()
pid := context.Int("pid")
if pid == 0 {
if err := stateKey.Get(id, keyInitPid, &pid); err != nil {
return err
}
} else {
// If a pid was provided map it to its hcs pid.
if err := stateKey.Get(id, fmt.Sprintf(keyPidMapFmt, pid), &pid); err != nil {
return err
}
}
p, err := c.hc.OpenProcess(pid)
if err != nil {
return err
}
defer p.Close()
return p.ResizeConsole(uint16(width), uint16(height))
},
}

View File

@ -1,52 +0,0 @@
package main
import (
"fmt"
"net"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/runhcs"
)
var argID = appargs.NonEmptyString
func absPathOrEmpty(path string) (string, error) {
if path == "" {
return "", nil
}
if strings.HasPrefix(path, runhcs.SafePipePrefix) {
if len(path) > len(runhcs.SafePipePrefix) {
return runhcs.SafePipePath(path[len(runhcs.SafePipePrefix):]), nil
}
}
return filepath.Abs(path)
}
// createPidFile creates a file with the processes pid inside it atomically
// it creates a temp file with the paths filename + '.' infront of it
// then renames the file
func createPidFile(path string, pid int) error {
var (
tmpDir = filepath.Dir(path)
tmpName = filepath.Join(tmpDir, fmt.Sprintf(".%s", filepath.Base(path)))
)
f, err := os.OpenFile(tmpName, os.O_RDWR|os.O_CREATE|os.O_EXCL|os.O_SYNC, 0666)
if err != nil {
return err
}
_, err = fmt.Fprintf(f, "%d", pid)
f.Close()
if err != nil {
return err
}
return os.Rename(tmpName, path)
}
func closeWritePipe(pipe net.Conn) error {
return pipe.(interface {
CloseWrite() error
}).CloseWrite()
}

View File

@ -1,39 +0,0 @@
package main
import (
"os"
"testing"
"github.com/Microsoft/hcsshim/internal/runhcs"
)
func Test_AbsPathOrEmpty(t *testing.T) {
wd, err := os.Getwd()
if err != nil {
t.Fatalf("failed to get test wd: %v", err)
}
tests := []string{
"",
runhcs.SafePipePrefix + "test",
runhcs.SafePipePrefix + "test with spaces",
"test",
"C:\\test..\\test",
}
expected := []string{
"",
runhcs.SafePipePrefix + "test",
runhcs.SafePipePrefix + "test%20with%20spaces",
wd + "\\test",
"C:\\test..\\test",
}
for i, test := range tests {
actual, err := absPathOrEmpty(test)
if err != nil {
t.Fatalf("absPathOrEmpty: error '%v'", err)
}
if actual != expected[i] {
t.Fatalf("absPathOrEmpty: actual '%s' != '%s'", actual, expected[i])
}
}
}

View File

@ -1,43 +0,0 @@
{
"FixedFileInfo": {
"FileVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"ProductVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"FileFlagsMask": "3f",
"FileFlags ": "00",
"FileOS": "040004",
"FileType": "01",
"FileSubType": "00"
},
"StringFileInfo": {
"Comments": "",
"CompanyName": "",
"FileDescription": "",
"FileVersion": "",
"InternalName": "",
"LegalCopyright": "",
"LegalTrademarks": "",
"OriginalFilename": "",
"PrivateBuild": "",
"ProductName": "",
"ProductVersion": "v1.0.0.0",
"SpecialBuild": ""
},
"VarFileInfo": {
"Translation": {
"LangID": "0409",
"CharsetID": "04B0"
}
},
"IconPath": "",
"ManifestPath": "runhcs.exe.manifest"
}

View File

@ -1,209 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"syscall"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
func vmID(id string) string {
return id + "@vm"
}
var vmshimCommand = cli.Command{
Name: "vmshim",
Usage: `launch a VM and containers inside it (do not call it outside of runhcs)`,
Hidden: true,
Flags: []cli.Flag{
cli.StringFlag{Name: "log-pipe", Hidden: true},
cli.StringFlag{Name: "os", Hidden: true},
},
Before: appargs.Validate(argID),
Action: func(context *cli.Context) error {
logPipe := context.String("log-pipe")
if logPipe != "" {
lpc, err := winio.DialPipe(logPipe, nil)
if err != nil {
return err
}
defer lpc.Close()
logrus.SetOutput(lpc)
} else {
logrus.SetOutput(os.Stderr)
}
fatalWriter.Writer = os.Stdout
pipePath := context.Args().First()
optsj, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
os.Stdin.Close()
var opts interface{}
isLCOW := context.String("os") == "linux"
if isLCOW {
opts = &uvm.OptionsLCOW{}
} else {
opts = &uvm.OptionsWCOW{}
}
err = json.Unmarshal(optsj, opts)
if err != nil {
return err
}
// Listen on the named pipe associated with this VM.
l, err := winio.ListenPipe(pipePath, &winio.PipeConfig{MessageMode: true})
if err != nil {
return err
}
var vm *uvm.UtilityVM
if isLCOW {
vm, err = uvm.CreateLCOW(opts.(*uvm.OptionsLCOW))
} else {
vm, err = uvm.CreateWCOW(opts.(*uvm.OptionsWCOW))
}
if err != nil {
return err
}
defer vm.Close()
if err = vm.Start(); err != nil {
return err
}
// Asynchronously wait for the VM to exit.
exitCh := make(chan error)
go func() {
exitCh <- vm.Wait()
}()
defer vm.Terminate()
// Alert the parent process that initialization has completed
// successfully.
os.Stdout.Write(runhcs.ShimSuccess)
os.Stdout.Close()
fatalWriter.Writer = ioutil.Discard
pipeCh := make(chan net.Conn)
go func() {
for {
conn, err := l.Accept()
if err != nil {
logrus.Error(err)
continue
}
pipeCh <- conn
}
}()
for {
select {
case <-exitCh:
return nil
case pipe := <-pipeCh:
err = processRequest(vm, pipe)
if err == nil {
_, err = pipe.Write(runhcs.ShimSuccess)
// Wait until the pipe is closed before closing the
// container so that it is properly handed off to the other
// process.
if err == nil {
err = closeWritePipe(pipe)
}
if err == nil {
ioutil.ReadAll(pipe)
}
} else {
logrus.WithError(err).
Error("failed creating container in VM")
fmt.Fprintf(pipe, "%v", err)
}
pipe.Close()
}
}
},
}
func processRequest(vm *uvm.UtilityVM, pipe net.Conn) error {
var req runhcs.VMRequest
err := json.NewDecoder(pipe).Decode(&req)
if err != nil {
return err
}
logrus.WithFields(logrus.Fields{
logfields.ContainerID: req.ID,
logfields.VMShimOperation: req.Op,
}).Debug("process request")
c, err := getContainer(req.ID, false)
if err != nil {
return err
}
defer func() {
if c != nil {
c.Close()
}
}()
switch req.Op {
case runhcs.OpCreateContainer:
err = createContainerInHost(c, vm)
if err != nil {
return err
}
c2 := c
c = nil
go func() {
c2.hc.Wait()
c2.Close()
}()
case runhcs.OpUnmountContainer, runhcs.OpUnmountContainerDiskOnly:
err = c.unmountInHost(vm, req.Op == runhcs.OpUnmountContainer)
if err != nil {
return err
}
case runhcs.OpSyncNamespace:
return errors.New("Not implemented")
default:
panic("unknown operation")
}
return nil
}
type noVMError struct {
ID string
}
func (err *noVMError) Error() string {
return "VM " + err.ID + " cannot be contacted"
}
func (c *container) issueVMRequest(op runhcs.VMRequestOp) error {
req := runhcs.VMRequest{
ID: c.ID,
Op: op,
}
if err := runhcs.IssueVMRequest(c.VMPipePath(), &req); err != nil {
if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND {
return &noVMError{c.HostID}
}
return err
}
return nil
}

View File

@ -1,64 +0,0 @@
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/Microsoft/hcsshim/ext4/tar2ext4"
)
var (
input = flag.String("i", "", "input file")
output = flag.String("o", "", "output file")
overlay = flag.Bool("overlay", false, "produce overlayfs-compatible layer image")
vhd = flag.Bool("vhd", false, "add a VHD footer to the end of the image")
inlineData = flag.Bool("inline", false, "write small file data into the inode; not compatible with DAX")
)
func main() {
flag.Parse()
if flag.NArg() != 0 || len(*output) == 0 {
flag.Usage()
os.Exit(1)
}
err := func() (err error) {
in := os.Stdin
if *input != "" {
in, err = os.Open(*input)
if err != nil {
return err
}
}
out, err := os.Create(*output)
if err != nil {
return err
}
var opts []tar2ext4.Option
if *overlay {
opts = append(opts, tar2ext4.ConvertWhiteout)
}
if *vhd {
opts = append(opts, tar2ext4.AppendVhdFooter)
}
if *inlineData {
opts = append(opts, tar2ext4.InlineData)
}
err = tar2ext4.Convert(in, out, opts...)
if err != nil {
return err
}
// Exhaust the tar stream.
io.Copy(ioutil.Discard, in)
return nil
}()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@ -1,36 +0,0 @@
package main
import (
"path/filepath"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var createCommand = cli.Command{
Name: "create",
Usage: "creates a new writable container layer",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the read-only parent layers",
},
},
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) error {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), true)
if err != nil {
return err
}
di := driverInfo
return hcsshim.CreateScratchLayer(di, path, layers[len(layers)-1], layers)
},
}

View File

@ -1,66 +0,0 @@
package main
import (
"compress/gzip"
"io"
"os"
"path/filepath"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/ociwclayer"
"github.com/urfave/cli"
)
var exportCommand = cli.Command{
Name: "export",
Usage: "exports a layer to a tar file",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the read-only parent layers",
},
cli.StringFlag{
Name: "output, o",
Usage: "output layer tar (defaults to stdout)",
},
cli.BoolFlag{
Name: "gzip, z",
Usage: "compress output with gzip compression",
},
},
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), true)
if err != nil {
return err
}
err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege})
if err != nil {
return err
}
fp := context.String("output")
f := os.Stdout
if fp != "" {
f, err = os.Create(fp)
if err != nil {
return err
}
defer f.Close()
}
w := io.Writer(f)
if context.Bool("gzip") {
w = gzip.NewWriter(w)
}
return ociwclayer.ExportLayer(w, path, layers)
},
}

View File

@ -1,74 +0,0 @@
package main
import (
"bufio"
"compress/gzip"
"io"
"os"
"path/filepath"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/ociwclayer"
"github.com/urfave/cli"
)
var importCommand = cli.Command{
Name: "import",
Usage: "imports a layer from a tar file",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the read-only parent layers",
},
cli.StringFlag{
Name: "input, i",
Usage: "input layer tar (defaults to stdin)",
},
},
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), false)
if err != nil {
return err
}
fp := context.String("input")
f := os.Stdin
if fp != "" {
f, err = os.Open(fp)
if err != nil {
return err
}
defer f.Close()
}
r, err := addDecompressor(f)
if err != nil {
return err
}
err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
if err != nil {
return err
}
_, err = ociwclayer.ImportLayer(r, path, layers)
return err
},
}
func addDecompressor(r io.Reader) (io.Reader, error) {
b := bufio.NewReader(r)
hdr, err := b.Peek(3)
if err != nil {
return nil, err
}
if hdr[0] == 0x1f && hdr[1] == 0x8b && hdr[2] == 8 {
return gzip.NewReader(b)
}
return b, nil
}

View File

@ -1,88 +0,0 @@
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var mountCommand = cli.Command{
Name: "mount",
Usage: "mounts a scratch",
ArgsUsage: "<scratch path>",
Flags: []cli.Flag{
cli.StringSliceFlag{
Name: "layer, l",
Usage: "paths to the parent layers for this layer",
},
},
Action: func(context *cli.Context) (err error) {
if context.NArg() != 1 {
return errors.New("invalid usage")
}
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
layers, err := normalizeLayers(context.StringSlice("layer"), true)
if err != nil {
return err
}
err = hcsshim.ActivateLayer(driverInfo, path)
if err != nil {
return err
}
defer func() {
if err != nil {
hcsshim.DeactivateLayer(driverInfo, path)
}
}()
err = hcsshim.PrepareLayer(driverInfo, path, layers)
if err != nil {
return err
}
defer func() {
if err != nil {
hcsshim.UnprepareLayer(driverInfo, path)
}
}()
mountPath, err := hcsshim.GetLayerMountPath(driverInfo, path)
if err != nil {
return err
}
_, err = fmt.Println(mountPath)
return err
},
}
var unmountCommand = cli.Command{
Name: "unmount",
Usage: "unmounts a scratch",
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
err = hcsshim.UnprepareLayer(driverInfo, path)
if err != nil {
fmt.Fprintln(os.Stderr, err)
}
err = hcsshim.DeactivateLayer(driverInfo, path)
if err != nil {
return err
}
return nil
},
}

View File

@ -1,31 +0,0 @@
package main
import (
"path/filepath"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/urfave/cli"
)
var removeCommand = cli.Command{
Name: "remove",
Usage: "permanently removes a layer directory in its entirety",
ArgsUsage: "<layer path>",
Before: appargs.Validate(appargs.NonEmptyString),
Action: func(context *cli.Context) (err error) {
path, err := filepath.Abs(context.Args().First())
if err != nil {
return err
}
err = winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege})
if err != nil {
return err
}
return hcsshim.DestroyLayer(driverInfo, path)
},
}

View File

@ -1,43 +0,0 @@
{
"FixedFileInfo": {
"FileVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"ProductVersion": {
"Major": 1,
"Minor": 0,
"Patch": 0,
"Build": 0
},
"FileFlagsMask": "3f",
"FileFlags ": "00",
"FileOS": "040004",
"FileType": "01",
"FileSubType": "00"
},
"StringFileInfo": {
"Comments": "",
"CompanyName": "",
"FileDescription": "",
"FileVersion": "",
"InternalName": "",
"LegalCopyright": "",
"LegalTrademarks": "",
"OriginalFilename": "",
"PrivateBuild": "",
"ProductName": "",
"ProductVersion": "v1.0.0.0",
"SpecialBuild": ""
},
"VarFileInfo": {
"Translation": {
"LangID": "0409",
"CharsetID": "04B0"
}
},
"IconPath": "",
"ManifestPath": "wclayer.exe.manifest"
}

View File

@ -1,10 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<description>wclayer</description>
<compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
<application>
<!-- Windows 10 -->
<supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
</application>
</compatibility>
</assembly>

View File

@ -1,60 +0,0 @@
package main
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/Microsoft/hcsshim"
"github.com/urfave/cli"
)
// Add a manifest to get proper Windows version detection.
//
// goversioninfo can be installed with "go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo"
//go:generate goversioninfo -platform-specific
var usage = `Windows Container layer utility
wclayer is a command line tool for manipulating Windows Container
storage layers. It can import and export layers from and to OCI format
layer tar files, create new writable layers, and mount and unmount
container images.`
var driverInfo = hcsshim.DriverInfo{}
func main() {
app := cli.NewApp()
app.Name = "wclayer"
app.Commands = []cli.Command{
createCommand,
exportCommand,
importCommand,
mountCommand,
removeCommand,
unmountCommand,
}
app.Usage = usage
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func normalizeLayers(il []string, needOne bool) ([]string, error) {
if needOne && len(il) == 0 {
return nil, errors.New("at least one read-only layer must be specified")
}
ol := make([]string, len(il))
for i := range il {
var err error
ol[i], err = filepath.Abs(il[i])
if err != nil {
return nil, err
}
}
return ol, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,355 +0,0 @@
package compactext4
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"os"
"strings"
"testing"
"time"
"github.com/Microsoft/hcsshim/ext4/internal/format"
)
type testFile struct {
Path string
File *File
Data []byte
DataSize int64
Link string
ExpectError bool
}
var (
data []byte
name string
)
func init() {
data = make([]byte, blockSize*2)
for i := range data {
data[i] = uint8(i)
}
nameb := make([]byte, 300)
for i := range nameb {
nameb[i] = byte('0' + i%10)
}
name = string(nameb)
}
type largeData struct {
pos int64
}
func (d *largeData) Read(b []byte) (int, error) {
p := d.pos
var pb [8]byte
for i := range b {
binary.LittleEndian.PutUint64(pb[:], uint64(p+int64(i)))
b[i] = pb[i%8]
}
p += int64(len(b))
return len(b), nil
}
func (tf *testFile) Reader() io.Reader {
if tf.DataSize != 0 {
return io.LimitReader(&largeData{}, tf.DataSize)
}
return bytes.NewReader(tf.Data)
}
func createTestFile(t *testing.T, w *Writer, tf testFile) {
var err error
if tf.File != nil {
tf.File.Size = int64(len(tf.Data))
if tf.File.Size == 0 {
tf.File.Size = tf.DataSize
}
err = w.Create(tf.Path, tf.File)
} else {
err = w.Link(tf.Link, tf.Path)
}
if tf.ExpectError && err == nil {
t.Errorf("%s: expected error", tf.Path)
} else if !tf.ExpectError && err != nil {
t.Error(err)
} else {
_, err := io.Copy(w, tf.Reader())
if err != nil {
t.Error(err)
}
}
}
func expectedMode(f *File) uint16 {
switch f.Mode & format.TypeMask {
case 0:
return f.Mode | S_IFREG
case S_IFLNK:
return f.Mode | 0777
default:
return f.Mode
}
}
func expectedSize(f *File) int64 {
switch f.Mode & format.TypeMask {
case 0, S_IFREG:
return f.Size
case S_IFLNK:
return int64(len(f.Linkname))
default:
return 0
}
}
func xattrsEqual(x1, x2 map[string][]byte) bool {
if len(x1) != len(x2) {
return false
}
for name, value := range x1 {
if !bytes.Equal(x2[name], value) {
return false
}
}
return true
}
func fileEqual(f1, f2 *File) bool {
return f1.Linkname == f2.Linkname &&
expectedSize(f1) == expectedSize(f2) &&
expectedMode(f1) == expectedMode(f2) &&
f1.Uid == f2.Uid &&
f1.Gid == f2.Gid &&
f1.Atime.Equal(f2.Atime) &&
f1.Ctime.Equal(f2.Ctime) &&
f1.Mtime.Equal(f2.Mtime) &&
f1.Crtime.Equal(f2.Crtime) &&
f1.Devmajor == f2.Devmajor &&
f1.Devminor == f2.Devminor &&
xattrsEqual(f1.Xattrs, f2.Xattrs)
}
func runTestsOnFiles(t *testing.T, testFiles []testFile, opts ...Option) {
image := "testfs.img"
imagef, err := os.Create(image)
if err != nil {
t.Fatal(err)
}
defer os.Remove(image)
defer imagef.Close()
w := NewWriter(imagef, opts...)
for _, tf := range testFiles {
createTestFile(t, w, tf)
if !tf.ExpectError && tf.File != nil {
f, err := w.Stat(tf.Path)
if err != nil {
if !strings.Contains(err.Error(), "cannot retrieve") {
t.Error(err)
}
} else if !fileEqual(f, tf.File) {
t.Errorf("%s: stat mismatch: %#v %#v", tf.Path, tf.File, f)
}
}
}
if t.Failed() {
return
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
fsck(t, image)
mountPath := "testmnt"
if mountImage(t, image, mountPath) {
defer unmountImage(t, mountPath)
validated := make(map[string]*testFile)
for i := range testFiles {
tf := testFiles[len(testFiles)-i-1]
if validated[tf.Link] != nil {
// The link target was subsequently replaced. Find the
// earlier instance.
for j := range testFiles[:len(testFiles)-i-1] {
otf := testFiles[j]
if otf.Path == tf.Link && !otf.ExpectError {
tf = otf
break
}
}
}
if !tf.ExpectError && validated[tf.Path] == nil {
verifyTestFile(t, mountPath, tf)
validated[tf.Path] = &tf
}
}
}
}
func TestBasic(t *testing.T) {
now := time.Now()
testFiles := []testFile{
{Path: "empty", File: &File{Mode: 0644}},
{Path: "small", File: &File{Mode: 0644}, Data: data[:40]},
{Path: "time", File: &File{Atime: now, Ctime: now.Add(time.Second), Mtime: now.Add(time.Hour)}},
{Path: "block_1", File: &File{Mode: 0644}, Data: data[:blockSize]},
{Path: "block_2", File: &File{Mode: 0644}, Data: data[:blockSize*2]},
{Path: "symlink", File: &File{Linkname: "block_1", Mode: format.S_IFLNK}},
{Path: "symlink_59", File: &File{Linkname: name[:59], Mode: format.S_IFLNK}},
{Path: "symlink_60", File: &File{Linkname: name[:60], Mode: format.S_IFLNK}},
{Path: "symlink_120", File: &File{Linkname: name[:120], Mode: format.S_IFLNK}},
{Path: "symlink_300", File: &File{Linkname: name[:300], Mode: format.S_IFLNK}},
{Path: "dir", File: &File{Mode: format.S_IFDIR | 0755}},
{Path: "dir/fifo", File: &File{Mode: format.S_IFIFO}},
{Path: "dir/sock", File: &File{Mode: format.S_IFSOCK}},
{Path: "dir/blk", File: &File{Mode: format.S_IFBLK, Devmajor: 0x5678, Devminor: 0x1234}},
{Path: "dir/chr", File: &File{Mode: format.S_IFCHR, Devmajor: 0x5678, Devminor: 0x1234}},
{Path: "dir/hard_link", Link: "small"},
}
runTestsOnFiles(t, testFiles)
}
func TestLargeDirectory(t *testing.T) {
testFiles := []testFile{
{Path: "bigdir", File: &File{Mode: format.S_IFDIR | 0755}},
}
for i := 0; i < 50000; i++ {
testFiles = append(testFiles, testFile{
Path: fmt.Sprintf("bigdir/%d", i), File: &File{Mode: 0644},
})
}
runTestsOnFiles(t, testFiles)
}
func TestInlineData(t *testing.T) {
testFiles := []testFile{
{Path: "inline_30", File: &File{Mode: 0644}, Data: data[:30]},
{Path: "inline_60", File: &File{Mode: 0644}, Data: data[:60]},
{Path: "inline_120", File: &File{Mode: 0644}, Data: data[:120]},
{Path: "inline_full", File: &File{Mode: 0644}, Data: data[:inlineDataSize]},
{Path: "block_min", File: &File{Mode: 0644}, Data: data[:inlineDataSize+1]},
}
runTestsOnFiles(t, testFiles, InlineData)
}
func TestXattrs(t *testing.T) {
testFiles := []testFile{
{Path: "withsmallxattrs",
File: &File{
Mode: format.S_IFREG | 0644,
Xattrs: map[string][]byte{
"user.foo": []byte("test"),
"user.bar": []byte("test2"),
},
},
},
{Path: "withlargexattrs",
File: &File{
Mode: format.S_IFREG | 0644,
Xattrs: map[string][]byte{
"user.foo": data[:100],
"user.bar": data[:50],
},
},
},
}
runTestsOnFiles(t, testFiles)
}
func TestReplace(t *testing.T) {
testFiles := []testFile{
{Path: "lost+found", ExpectError: true, File: &File{}}, // can't change type
{Path: "lost+found", File: &File{Mode: format.S_IFDIR | 0777}},
{Path: "dir", File: &File{Mode: format.S_IFDIR | 0777}},
{Path: "dir/file", File: &File{}},
{Path: "dir", File: &File{Mode: format.S_IFDIR | 0700}},
{Path: "file", File: &File{}},
{Path: "file", File: &File{Mode: 0600}},
{Path: "file2", File: &File{}},
{Path: "link", Link: "file2"},
{Path: "file2", File: &File{Mode: 0600}},
{Path: "nolinks", File: &File{}},
{Path: "nolinks", ExpectError: true, Link: "file"}, // would orphan nolinks
{Path: "onelink", File: &File{}},
{Path: "onelink2", Link: "onelink"},
{Path: "onelink", Link: "file"},
{Path: "", ExpectError: true, File: &File{}},
{Path: "", ExpectError: true, Link: "file"},
{Path: "", File: &File{Mode: format.S_IFDIR | 0777}},
{Path: "smallxattr", File: &File{Xattrs: map[string][]byte{"user.foo": data[:4]}}},
{Path: "smallxattr", File: &File{Xattrs: map[string][]byte{"user.foo": data[:8]}}},
{Path: "smallxattr_delete", File: &File{Xattrs: map[string][]byte{"user.foo": data[:4]}}},
{Path: "smallxattr_delete", File: &File{}},
{Path: "largexattr", File: &File{Xattrs: map[string][]byte{"user.small": data[:8], "user.foo": data[:200]}}},
{Path: "largexattr", File: &File{Xattrs: map[string][]byte{"user.small": data[:12], "user.foo": data[:400]}}},
{Path: "largexattr", File: &File{Xattrs: map[string][]byte{"user.foo": data[:200]}}},
{Path: "largexattr_delete", File: &File{}},
}
runTestsOnFiles(t, testFiles)
}
func TestTime(t *testing.T) {
now := time.Now()
now2 := fsTimeToTime(timeToFsTime(now))
if now.UnixNano() != now2.UnixNano() {
t.Fatalf("%s != %s", now, now2)
}
}
func TestLargeFile(t *testing.T) {
testFiles := []testFile{
{Path: "small", File: &File{}, DataSize: 1024 * 1024}, // can't change type
{Path: "medium", File: &File{}, DataSize: 200 * 1024 * 1024}, // can't change type
{Path: "large", File: &File{}, DataSize: 600 * 1024 * 1024}, // can't change type
}
runTestsOnFiles(t, testFiles)
}
func TestFileLinkLimit(t *testing.T) {
testFiles := []testFile{
{Path: "file", File: &File{}},
}
for i := 0; i < format.MaxLinks; i++ {
testFiles = append(testFiles, testFile{Path: fmt.Sprintf("link%d", i), Link: "file"})
}
testFiles[len(testFiles)-1].ExpectError = true
runTestsOnFiles(t, testFiles)
}
func TestDirLinkLimit(t *testing.T) {
testFiles := []testFile{
{Path: "dir", File: &File{Mode: S_IFDIR}},
}
for i := 0; i < format.MaxLinks-1; i++ {
testFiles = append(testFiles, testFile{Path: fmt.Sprintf("dir/%d", i), File: &File{Mode: S_IFDIR}})
}
testFiles[len(testFiles)-1].ExpectError = true
runTestsOnFiles(t, testFiles)
}
func TestLargeDisk(t *testing.T) {
testFiles := []testFile{
{Path: "file", File: &File{}},
}
runTestsOnFiles(t, testFiles, MaximumDiskSize(maxMaxDiskSize))
}

View File

@ -1,248 +0,0 @@
package compactext4
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path"
"syscall"
"testing"
"time"
"unsafe"
"github.com/Microsoft/hcsshim/ext4/internal/format"
)
func timeEqual(ts syscall.Timespec, t time.Time) bool {
sec, nsec := t.Unix(), t.Nanosecond()
if t.IsZero() {
sec, nsec = 0, 0
}
return ts.Sec == sec && int(ts.Nsec) == nsec
}
func expectedDevice(f *File) uint64 {
return uint64(f.Devminor&0xff | f.Devmajor<<8 | (f.Devminor&0xffffff00)<<12)
}
func llistxattr(path string, b []byte) (int, error) {
pathp := syscall.StringBytePtr(path)
var p unsafe.Pointer
if len(b) > 0 {
p = unsafe.Pointer(&b[0])
}
r, _, e := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathp)), uintptr(p), uintptr(len(b)))
if e != 0 {
return 0, &os.PathError{Path: path, Op: "llistxattr", Err: syscall.Errno(e)}
}
return int(r), nil
}
func lgetxattr(path string, name string, b []byte) (int, error) {
pathp := syscall.StringBytePtr(path)
namep := syscall.StringBytePtr(name)
var p unsafe.Pointer
if len(b) > 0 {
p = unsafe.Pointer(&b[0])
}
r, _, e := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathp)), uintptr(unsafe.Pointer(namep)), uintptr(p), uintptr(len(b)), 0, 0)
if e != 0 {
return 0, &os.PathError{Path: path, Op: "lgetxattr", Err: syscall.Errno(e)}
}
return int(r), nil
}
func readXattrs(path string) (map[string][]byte, error) {
xattrs := make(map[string][]byte)
var buf [4096]byte
var buf2 [4096]byte
b := buf[:]
n, err := llistxattr(path, b)
if err != nil {
return nil, err
}
b = b[:n]
for len(b) != 0 {
nn := bytes.IndexByte(b, 0)
name := string(b[:nn])
b = b[nn+1:]
vn, err := lgetxattr(path, name, buf2[:])
if err != nil {
return nil, err
}
value := buf2[:vn]
xattrs[name] = value
}
return xattrs, nil
}
func streamEqual(r1, r2 io.Reader) (bool, error) {
var b [4096]byte
var b2 [4096]byte
for {
n, err := r1.Read(b[:])
if n == 0 {
if err == io.EOF {
break
}
if err == nil {
continue
}
return false, err
}
_, err = io.ReadFull(r2, b2[:n])
if err == io.EOF || err == io.ErrUnexpectedEOF {
return false, nil
}
if err != nil {
return false, err
}
if !bytes.Equal(b[n:], b2[n:]) {
return false, nil
}
}
// Check the tail of r2
_, err := r2.Read(b[:1])
if err == nil {
return false, nil
}
if err != io.EOF {
return false, err
}
return true, nil
}
func verifyTestFile(t *testing.T, mountPath string, tf testFile) {
name := path.Join(mountPath, tf.Path)
fi, err := os.Lstat(name)
if err != nil {
t.Error(err)
return
}
st := fi.Sys().(*syscall.Stat_t)
if tf.File != nil {
if st.Mode != uint32(expectedMode(tf.File)) ||
st.Uid != tf.File.Uid ||
st.Gid != tf.File.Gid ||
(!fi.IsDir() && st.Size != expectedSize(tf.File)) ||
st.Rdev != expectedDevice(tf.File) ||
!timeEqual(st.Atim, tf.File.Atime) ||
!timeEqual(st.Mtim, tf.File.Mtime) ||
!timeEqual(st.Ctim, tf.File.Ctime) {
t.Errorf("%s: stat mismatch, expected: %#v got: %#v", tf.Path, tf.File, st)
}
xattrs, err := readXattrs(name)
if err != nil {
t.Error(err)
} else if !xattrsEqual(xattrs, tf.File.Xattrs) {
t.Errorf("%s: xattr mismatch, expected: %#v got: %#v", tf.Path, tf.File.Xattrs, xattrs)
}
switch tf.File.Mode & format.TypeMask {
case S_IFREG:
if f, err := os.Open(name); err != nil {
t.Error(err)
} else {
same, err := streamEqual(f, tf.Reader())
if err != nil {
t.Error(err)
} else if !same {
t.Errorf("%s: data mismatch", tf.Path)
}
f.Close()
}
case S_IFLNK:
if link, err := os.Readlink(name); err != nil {
t.Error(err)
} else if link != tf.File.Linkname {
t.Errorf("%s: link mismatch, expected: %s got: %s", tf.Path, tf.File.Linkname, link)
}
}
} else {
lfi, err := os.Lstat(path.Join(mountPath, tf.Link))
if err != nil {
t.Error(err)
return
}
lst := lfi.Sys().(*syscall.Stat_t)
if lst.Ino != st.Ino {
t.Errorf("%s: hard link mismatch with %s, expected inode: %d got inode: %d", tf.Path, tf.Link, lst.Ino, st.Ino)
}
}
}
type capHeader struct {
version uint32
pid int
}
type capData struct {
effective uint32
permitted uint32
inheritable uint32
}
const CAP_SYS_ADMIN = 21
type caps struct {
hdr capHeader
data [2]capData
}
func getCaps() (caps, error) {
var c caps
// Get capability version
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(nil)), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
// Get current capabilities
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(&c.data[0])), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
return c, nil
}
func mountImage(t *testing.T, image string, mountPath string) bool {
caps, err := getCaps()
if err != nil || caps.data[0].effective&(1<<uint(CAP_SYS_ADMIN)) == 0 {
t.Log("cannot mount to run verification tests without CAP_SYS_ADMIN")
return false
}
err = os.MkdirAll(mountPath, 0777)
if err != nil {
t.Fatal(err)
}
out, err := exec.Command("mount", "-o", "loop,ro", "-t", "ext4", image, mountPath).CombinedOutput()
t.Logf("%s", out)
if err != nil {
t.Fatal(err)
}
return true
}
func unmountImage(t *testing.T, mountPath string) {
out, err := exec.Command("umount", mountPath).CombinedOutput()
t.Logf("%s", out)
if err != nil {
t.Log(err)
}
}
func fsck(t *testing.T, image string) {
cmd := exec.Command("e2fsck", "-v", "-f", "-n", image)
out, err := cmd.CombinedOutput()
t.Logf("%s", out)
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,18 +0,0 @@
// +build !linux
package compactext4
import "testing"
func verifyTestFile(t *testing.T, mountPath string, tf testFile) {
}
func mountImage(t *testing.T, image string, mountPath string) bool {
return false
}
func unmountImage(t *testing.T, mountPath string) {
}
func fsck(t *testing.T, image string) {
}

View File

@ -1,411 +0,0 @@
package format
type SuperBlock struct {
InodesCount uint32
BlocksCountLow uint32
RootBlocksCountLow uint32
FreeBlocksCountLow uint32
FreeInodesCount uint32
FirstDataBlock uint32
LogBlockSize uint32
LogClusterSize uint32
BlocksPerGroup uint32
ClustersPerGroup uint32
InodesPerGroup uint32
Mtime uint32
Wtime uint32
MountCount uint16
MaxMountCount uint16
Magic uint16
State uint16
Errors uint16
MinorRevisionLevel uint16
LastCheck uint32
CheckInterval uint32
CreatorOS uint32
RevisionLevel uint32
DefaultReservedUid uint16
DefaultReservedGid uint16
FirstInode uint32
InodeSize uint16
BlockGroupNr uint16
FeatureCompat CompatFeature
FeatureIncompat IncompatFeature
FeatureRoCompat RoCompatFeature
UUID [16]uint8
VolumeName [16]byte
LastMounted [64]byte
AlgorithmUsageBitmap uint32
PreallocBlocks uint8
PreallocDirBlocks uint8
ReservedGdtBlocks uint16
JournalUUID [16]uint8
JournalInum uint32
JournalDev uint32
LastOrphan uint32
HashSeed [4]uint32
DefHashVersion uint8
JournalBackupType uint8
DescSize uint16
DefaultMountOpts uint32
FirstMetaBg uint32
MkfsTime uint32
JournalBlocks [17]uint32
BlocksCountHigh uint32
RBlocksCountHigh uint32
FreeBlocksCountHigh uint32
MinExtraIsize uint16
WantExtraIsize uint16
Flags uint32
RaidStride uint16
MmpInterval uint16
MmpBlock uint64
RaidStripeWidth uint32
LogGroupsPerFlex uint8
ChecksumType uint8
ReservedPad uint16
KbytesWritten uint64
SnapshotInum uint32
SnapshotID uint32
SnapshotRBlocksCount uint64
SnapshotList uint32
ErrorCount uint32
FirstErrorTime uint32
FirstErrorInode uint32
FirstErrorBlock uint64
FirstErrorFunc [32]uint8
FirstErrorLine uint32
LastErrorTime uint32
LastErrorInode uint32
LastErrorLine uint32
LastErrorBlock uint64
LastErrorFunc [32]uint8
MountOpts [64]uint8
UserQuotaInum uint32
GroupQuotaInum uint32
OverheadBlocks uint32
BackupBgs [2]uint32
EncryptAlgos [4]uint8
EncryptPwSalt [16]uint8
LpfInode uint32
ProjectQuotaInum uint32
ChecksumSeed uint32
WtimeHigh uint8
MtimeHigh uint8
MkfsTimeHigh uint8
LastcheckHigh uint8
FirstErrorTimeHigh uint8
LastErrorTimeHigh uint8
Pad [2]uint8
Reserved [96]uint32
Checksum uint32
}
const SuperBlockMagic uint16 = 0xef53
type CompatFeature uint32
type IncompatFeature uint32
type RoCompatFeature uint32
const (
CompatDirPrealloc CompatFeature = 0x1
CompatImagicInodes CompatFeature = 0x2
CompatHasJournal CompatFeature = 0x4
CompatExtAttr CompatFeature = 0x8
CompatResizeInode CompatFeature = 0x10
CompatDirIndex CompatFeature = 0x20
CompatLazyBg CompatFeature = 0x40
CompatExcludeInode CompatFeature = 0x80
CompatExcludeBitmap CompatFeature = 0x100
CompatSparseSuper2 CompatFeature = 0x200
IncompatCompression IncompatFeature = 0x1
IncompatFiletype IncompatFeature = 0x2
IncompatRecover IncompatFeature = 0x4
IncompatJournalDev IncompatFeature = 0x8
IncompatMetaBg IncompatFeature = 0x10
IncompatExtents IncompatFeature = 0x40
Incompat_64Bit IncompatFeature = 0x80
IncompatMmp IncompatFeature = 0x100
IncompatFlexBg IncompatFeature = 0x200
IncompatEaInode IncompatFeature = 0x400
IncompatDirdata IncompatFeature = 0x1000
IncompatCsumSeed IncompatFeature = 0x2000
IncompatLargedir IncompatFeature = 0x4000
IncompatInlineData IncompatFeature = 0x8000
IncompatEncrypt IncompatFeature = 0x10000
RoCompatSparseSuper RoCompatFeature = 0x1
RoCompatLargeFile RoCompatFeature = 0x2
RoCompatBtreeDir RoCompatFeature = 0x4
RoCompatHugeFile RoCompatFeature = 0x8
RoCompatGdtCsum RoCompatFeature = 0x10
RoCompatDirNlink RoCompatFeature = 0x20
RoCompatExtraIsize RoCompatFeature = 0x40
RoCompatHasSnapshot RoCompatFeature = 0x80
RoCompatQuota RoCompatFeature = 0x100
RoCompatBigalloc RoCompatFeature = 0x200
RoCompatMetadataCsum RoCompatFeature = 0x400
RoCompatReplica RoCompatFeature = 0x800
RoCompatReadonly RoCompatFeature = 0x1000
RoCompatProject RoCompatFeature = 0x2000
)
type BlockGroupFlag uint16
const (
BlockGroupInodeUninit BlockGroupFlag = 0x1
BlockGroupBlockUninit BlockGroupFlag = 0x2
BlockGroupInodeZeroed BlockGroupFlag = 0x4
)
type GroupDescriptor struct {
BlockBitmapLow uint32
InodeBitmapLow uint32
InodeTableLow uint32
FreeBlocksCountLow uint16
FreeInodesCountLow uint16
UsedDirsCountLow uint16
Flags BlockGroupFlag
ExcludeBitmapLow uint32
BlockBitmapCsumLow uint16
InodeBitmapCsumLow uint16
ItableUnusedLow uint16
Checksum uint16
}
type GroupDescriptor64 struct {
GroupDescriptor
BlockBitmapHigh uint32
InodeBitmapHigh uint32
InodeTableHigh uint32
FreeBlocksCountHigh uint16
FreeInodesCountHigh uint16
UsedDirsCountHigh uint16
ItableUnusedHigh uint16
ExcludeBitmapHigh uint32
BlockBitmapCsumHigh uint16
InodeBitmapCsumHigh uint16
Reserved uint32
}
const (
S_IXOTH = 0x1
S_IWOTH = 0x2
S_IROTH = 0x4
S_IXGRP = 0x8
S_IWGRP = 0x10
S_IRGRP = 0x20
S_IXUSR = 0x40
S_IWUSR = 0x80
S_IRUSR = 0x100
S_ISVTX = 0x200
S_ISGID = 0x400
S_ISUID = 0x800
S_IFIFO = 0x1000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFBLK = 0x6000
S_IFREG = 0x8000
S_IFLNK = 0xA000
S_IFSOCK = 0xC000
TypeMask uint16 = 0xF000
)
type InodeNumber uint32
const (
InodeRoot = 2
)
type Inode struct {
Mode uint16
Uid uint16
SizeLow uint32
Atime uint32
Ctime uint32
Mtime uint32
Dtime uint32
Gid uint16
LinksCount uint16
BlocksLow uint32
Flags InodeFlag
Version uint32
Block [60]byte
Generation uint32
XattrBlockLow uint32
SizeHigh uint32
ObsoleteFragmentAddr uint32
BlocksHigh uint16
XattrBlockHigh uint16
UidHigh uint16
GidHigh uint16
ChecksumLow uint16
Reserved uint16
ExtraIsize uint16
ChecksumHigh uint16
CtimeExtra uint32
MtimeExtra uint32
AtimeExtra uint32
Crtime uint32
CrtimeExtra uint32
VersionHigh uint32
Projid uint32
}
type InodeFlag uint32
const (
InodeFlagSecRm InodeFlag = 0x1
InodeFlagUnRm InodeFlag = 0x2
InodeFlagCompressed InodeFlag = 0x4
InodeFlagSync InodeFlag = 0x8
InodeFlagImmutable InodeFlag = 0x10
InodeFlagAppend InodeFlag = 0x20
InodeFlagNoDump InodeFlag = 0x40
InodeFlagNoAtime InodeFlag = 0x80
InodeFlagDirtyCompressed InodeFlag = 0x100
InodeFlagCompressedClusters InodeFlag = 0x200
InodeFlagNoCompress InodeFlag = 0x400
InodeFlagEncrypted InodeFlag = 0x800
InodeFlagHashedIndex InodeFlag = 0x1000
InodeFlagMagic InodeFlag = 0x2000
InodeFlagJournalData InodeFlag = 0x4000
InodeFlagNoTail InodeFlag = 0x8000
InodeFlagDirSync InodeFlag = 0x10000
InodeFlagTopDir InodeFlag = 0x20000
InodeFlagHugeFile InodeFlag = 0x40000
InodeFlagExtents InodeFlag = 0x80000
InodeFlagEaInode InodeFlag = 0x200000
InodeFlagEOFBlocks InodeFlag = 0x400000
InodeFlagSnapfile InodeFlag = 0x01000000
InodeFlagSnapfileDeleted InodeFlag = 0x04000000
InodeFlagSnapfileShrunk InodeFlag = 0x08000000
InodeFlagInlineData InodeFlag = 0x10000000
InodeFlagProjectIDInherit InodeFlag = 0x20000000
InodeFlagReserved InodeFlag = 0x80000000
)
const (
MaxLinks = 65000
)
type ExtentHeader struct {
Magic uint16
Entries uint16
Max uint16
Depth uint16
Generation uint32
}
const ExtentHeaderMagic uint16 = 0xf30a
type ExtentIndexNode struct {
Block uint32
LeafLow uint32
LeafHigh uint16
Unused uint16
}
type ExtentLeafNode struct {
Block uint32
Length uint16
StartHigh uint16
StartLow uint32
}
type ExtentTail struct {
Checksum uint32
}
type DirectoryEntry struct {
Inode InodeNumber
RecordLength uint16
NameLength uint8
FileType FileType
//Name []byte
}
type FileType uint8
const (
FileTypeUnknown FileType = 0x0
FileTypeRegular FileType = 0x1
FileTypeDirectory FileType = 0x2
FileTypeCharacter FileType = 0x3
FileTypeBlock FileType = 0x4
FileTypeFIFO FileType = 0x5
FileTypeSocket FileType = 0x6
FileTypeSymbolicLink FileType = 0x7
)
type DirectoryEntryTail struct {
ReservedZero1 uint32
RecordLength uint16
ReservedZero2 uint8
FileType uint8
Checksum uint32
}
type DirectoryTreeRoot struct {
Dot DirectoryEntry
DotName [4]byte
DotDot DirectoryEntry
DotDotName [4]byte
ReservedZero uint32
HashVersion uint8
InfoLength uint8
IndirectLevels uint8
UnusedFlags uint8
Limit uint16
Count uint16
Block uint32
//Entries []DirectoryTreeEntry
}
type DirectoryTreeNode struct {
FakeInode uint32
FakeRecordLength uint16
NameLength uint8
FileType uint8
Limit uint16
Count uint16
Block uint32
//Entries []DirectoryTreeEntry
}
type DirectoryTreeEntry struct {
Hash uint32
Block uint32
}
type DirectoryTreeTail struct {
Reserved uint32
Checksum uint32
}
type XAttrInodeBodyHeader struct {
Magic uint32
}
type XAttrHeader struct {
Magic uint32
ReferenceCount uint32
Blocks uint32
Hash uint32
Checksum uint32
Reserved [3]uint32
}
const XAttrHeaderMagic uint32 = 0xea020000
type XAttrEntry struct {
NameLength uint8
NameIndex uint8
ValueOffset uint16
ValueInum uint32
ValueSize uint32
Hash uint32
//Name []byte
}

View File

@ -1,174 +0,0 @@
package tar2ext4
import (
"archive/tar"
"bufio"
"encoding/binary"
"io"
"path"
"strings"
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
)
type params struct {
convertWhiteout bool
appendVhdFooter bool
ext4opts []compactext4.Option
}
// Option is the type for optional parameters to Convert.
type Option func(*params)
// ConvertWhiteout instructs the converter to convert OCI-style whiteouts
// (beginning with .wh.) to overlay-style whiteouts.
func ConvertWhiteout(p *params) {
p.convertWhiteout = true
}
// AppendVhdFooter instructs the converter to add a fixed VHD footer to the
// file.
func AppendVhdFooter(p *params) {
p.appendVhdFooter = true
}
// InlineData instructs the converter to write small files into the inode
// structures directly. This creates smaller images but currently is not
// compatible with DAX.
func InlineData(p *params) {
p.ext4opts = append(p.ext4opts, compactext4.InlineData)
}
// MaximumDiskSize instructs the writer to limit the disk size to the specified
// value. This also reserves enough metadata space for the specified disk size.
// If not provided, then 16GB is the default.
func MaximumDiskSize(size int64) Option {
return func(p *params) {
p.ext4opts = append(p.ext4opts, compactext4.MaximumDiskSize(size))
}
}
const (
whiteoutPrefix = ".wh."
opaqueWhiteout = ".wh..wh..opq"
)
// Convert writes a compact ext4 file system image that contains the files in the
// input tar stream.
func Convert(r io.Reader, w io.ReadWriteSeeker, options ...Option) error {
var p params
for _, opt := range options {
opt(&p)
}
t := tar.NewReader(bufio.NewReader(r))
fs := compactext4.NewWriter(w, p.ext4opts...)
for {
hdr, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if p.convertWhiteout {
dir, name := path.Split(hdr.Name)
if strings.HasPrefix(name, whiteoutPrefix) {
if name == opaqueWhiteout {
// Update the directory with the appropriate xattr.
f, err := fs.Stat(dir)
if err != nil {
return err
}
f.Xattrs["trusted.overlay.opaque"] = []byte("y")
err = fs.Create(dir, f)
if err != nil {
return err
}
} else {
// Create an overlay-style whiteout.
f := &compactext4.File{
Mode: compactext4.S_IFCHR,
Devmajor: 0,
Devminor: 0,
}
err = fs.Create(path.Join(dir, name[len(whiteoutPrefix):]), f)
if err != nil {
return err
}
}
continue
}
}
if hdr.Typeflag == tar.TypeLink {
err = fs.Link(hdr.Linkname, hdr.Name)
if err != nil {
return err
}
} else {
f := &compactext4.File{
Mode: uint16(hdr.Mode),
Atime: hdr.AccessTime,
Mtime: hdr.ModTime,
Ctime: hdr.ChangeTime,
Crtime: hdr.ModTime,
Size: hdr.Size,
Uid: uint32(hdr.Uid),
Gid: uint32(hdr.Gid),
Linkname: hdr.Linkname,
Devmajor: uint32(hdr.Devmajor),
Devminor: uint32(hdr.Devminor),
Xattrs: make(map[string][]byte),
}
for key, value := range hdr.PAXRecords {
const xattrPrefix = "SCHILY.xattr."
if strings.HasPrefix(key, xattrPrefix) {
f.Xattrs[key[len(xattrPrefix):]] = []byte(value)
}
}
var typ uint16
switch hdr.Typeflag {
case tar.TypeReg, tar.TypeRegA:
typ = compactext4.S_IFREG
case tar.TypeSymlink:
typ = compactext4.S_IFLNK
case tar.TypeChar:
typ = compactext4.S_IFCHR
case tar.TypeBlock:
typ = compactext4.S_IFBLK
case tar.TypeDir:
typ = compactext4.S_IFDIR
case tar.TypeFifo:
typ = compactext4.S_IFIFO
}
f.Mode &= ^compactext4.TypeMask
f.Mode |= typ
err = fs.Create(hdr.Name, f)
if err != nil {
return err
}
_, err = io.Copy(fs, t)
if err != nil {
return err
}
}
}
err := fs.Close()
if err != nil {
return err
}
if p.appendVhdFooter {
size, err := w.Seek(0, io.SeekEnd)
if err != nil {
return err
}
err = binary.Write(w, binary.BigEndian, makeFixedVHDFooter(size))
if err != nil {
return err
}
}
return nil
}

View File

@ -1,76 +0,0 @@
package tar2ext4
import (
"bytes"
"crypto/rand"
"encoding/binary"
)
// Constants for the VHD footer
const (
cookieMagic = "conectix"
featureMask = 0x2
fileFormatVersionMagic = 0x00010000
fixedDataOffset = -1
creatorVersionMagic = 0x000a0000
diskTypeFixed = 2
)
type vhdFooter struct {
Cookie [8]byte
Features uint32
FileFormatVersion uint32
DataOffset int64
TimeStamp uint32
CreatorApplication [4]byte
CreatorVersion uint32
CreatorHostOS [4]byte
OriginalSize int64
CurrentSize int64
DiskGeometry uint32
DiskType uint32
Checksum uint32
UniqueID [16]uint8
SavedState uint8
Reserved [427]uint8
}
func makeFixedVHDFooter(size int64) *vhdFooter {
footer := &vhdFooter{
Features: featureMask,
FileFormatVersion: fileFormatVersionMagic,
DataOffset: fixedDataOffset,
CreatorVersion: creatorVersionMagic,
OriginalSize: size,
CurrentSize: size,
DiskType: diskTypeFixed,
UniqueID: generateUUID(),
}
copy(footer.Cookie[:], cookieMagic)
footer.Checksum = calculateCheckSum(footer)
return footer
}
func calculateCheckSum(footer *vhdFooter) uint32 {
oldchk := footer.Checksum
footer.Checksum = 0
buf := &bytes.Buffer{}
binary.Write(buf, binary.BigEndian, footer)
var chk uint32
bufBytes := buf.Bytes()
for i := 0; i < len(bufBytes); i++ {
chk += uint32(bufBytes[i])
}
footer.Checksum = oldchk
return uint32(^chk)
}
func generateUUID() [16]byte {
res := [16]byte{}
if _, err := rand.Read(res[:]); err != nil {
panic(err)
}
return res
}

View File

@ -1,298 +0,0 @@
// +build integration
package hcn
import (
"encoding/json"
"fmt"
"testing"
)
func TestCreateDeleteEndpoint(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
Endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
jsonString, err := json.Marshal(Endpoint)
if err != nil {
t.Fatal(err)
}
fmt.Printf("Endpoint JSON:\n%s \n", jsonString)
err = Endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetEndpointById(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
Endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
foundEndpoint, err := GetEndpointByID(Endpoint.Id)
if err != nil {
t.Fatal(err)
}
if foundEndpoint == nil {
t.Fatal("No Endpoint found")
}
err = foundEndpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetEndpointByName(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
Endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
foundEndpoint, err := GetEndpointByName(Endpoint.Name)
if err != nil {
t.Fatal(err)
}
if foundEndpoint == nil {
t.Fatal("No Endpoint found")
}
err = foundEndpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestListEndpoints(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
Endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
foundEndpoints, err := ListEndpoints()
if err != nil {
t.Fatal(err)
}
if len(foundEndpoints) == 0 {
t.Fatal("No Endpoint found")
}
err = Endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestListEndpointsOfNetwork(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
Endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
foundEndpoints, err := ListEndpointsOfNetwork(network.Id)
if err != nil {
t.Fatal(err)
}
if len(foundEndpoints) == 0 {
t.Fatal("No Endpoint found")
}
err = Endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestEndpointNamespaceAttachDetach(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
err = endpoint.NamespaceAttach(namespace.Id)
if err != nil {
t.Fatal(err)
}
err = endpoint.NamespaceDetach(namespace.Id)
if err != nil {
t.Fatal(err)
}
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestCreateEndpointWithNamespace(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
Endpoint, err := HcnCreateTestEndpointWithNamespace(network, namespace)
if err != nil {
t.Fatal(err)
}
if Endpoint.HostComputeNamespace == "" {
t.Fatal("No Namespace detected.")
}
err = Endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestApplyPolicyOnEndpoint(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
Endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
endpointPolicyList, err := HcnCreateAcls()
if err != nil {
t.Fatal(err)
}
jsonString, err := json.Marshal(*endpointPolicyList)
if err != nil {
t.Fatal(err)
}
fmt.Printf("ACLS JSON:\n%s \n", jsonString)
err = Endpoint.ApplyPolicy(*endpointPolicyList)
if err != nil {
t.Fatal(err)
}
foundEndpoint, err := GetEndpointByName(Endpoint.Name)
if err != nil {
t.Fatal(err)
}
if len(foundEndpoint.Policies) == 0 {
t.Fatal("No Endpoint Policies found")
}
err = Endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestModifyEndpointSettings(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
endpointPolicy, err := HcnCreateAcls()
if err != nil {
t.Fatal(err)
}
settingsJson, err := json.Marshal(endpointPolicy)
if err != nil {
t.Fatal(err)
}
requestMessage := &ModifyEndpointSettingRequest{
ResourceType: EndpointResourceTypePolicy,
RequestType: RequestTypeUpdate,
Settings: settingsJson,
}
err = ModifyEndpointSettings(endpoint.Id, requestMessage)
if err != nil {
t.Fatal(err)
}
foundEndpoint, err := GetEndpointByName(endpoint.Name)
if err != nil {
t.Fatal(err)
}
if len(foundEndpoint.Policies) == 0 {
t.Fatal("No Endpoint Policies found")
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,34 +0,0 @@
// +build integration
package hcn
import (
"testing"
)
func TestMissingNetworkByName(t *testing.T) {
_, err := GetNetworkByName("Not found name")
if err == nil {
t.Fatal("Error was not thrown.")
}
if !IsNotFoundError(err) {
t.Fatal("Unrelated error was thrown.")
}
if _, ok := err.(NetworkNotFoundError); !ok {
t.Fatal("Wrong error type was thrown.")
}
}
func TestMissingNetworkById(t *testing.T) {
// Random guid
_, err := GetNetworkByID("5f0b1190-63be-4e0c-b974-bd0f55675a42")
if err == nil {
t.Fatal("Unrelated error was thrown.")
}
if !IsNotFoundError(err) {
t.Fatal("Unrelated error was thrown.")
}
if _, ok := err.(NetworkNotFoundError); !ok {
t.Fatal("Wrong error type was thrown.")
}
}

View File

@ -10,10 +10,10 @@ import (
// LoadBalancerPortMapping is associated with HostComputeLoadBalancer
type LoadBalancerPortMapping struct {
Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17
InternalPort uint16 `json:",omitempty"`
ExternalPort uint16 `json:",omitempty"`
Flags uint32 `json:",omitempty"` // 0: None, 1: EnableILB, 2: LocalRoutedVip
Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17
InternalPort uint16 `json:",omitempty"`
ExternalPort uint16 `json:",omitempty"`
Flags LoadBalancerPortMappingFlags `json:",omitempty"`
}
// HostComputeLoadBalancer represents software load balancer.
@ -24,9 +24,35 @@ type HostComputeLoadBalancer struct {
FrontendVIPs []string `json:",omitempty"`
PortMappings []LoadBalancerPortMapping `json:",omitempty"`
SchemaVersion SchemaVersion `json:",omitempty"`
Flags uint32 `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn
Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn
}
//LoadBalancerFlags modify settings for a loadbalancer.
type LoadBalancerFlags uint32
var (
// LoadBalancerFlagsNone is the default.
LoadBalancerFlagsNone LoadBalancerFlags = 0
// LoadBalancerFlagsDSR enables Direct Server Return (DSR)
LoadBalancerFlagsDSR LoadBalancerFlags = 1
)
// LoadBalancerPortMappingFlags are special settings on a loadbalancer.
type LoadBalancerPortMappingFlags uint32
var (
// LoadBalancerPortMappingFlagsNone is the default.
LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags
// LoadBalancerPortMappingFlagsILB enables internal loadbalancing.
LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1
// LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host.
LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2
// LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP.
LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4
// LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP.
LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8
)
func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) {
// Open loadBalancer.
var (
@ -280,20 +306,8 @@ func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComput
}
// AddLoadBalancer for the specified endpoints
func AddLoadBalancer(endpoints []HostComputeEndpoint, isILB bool, isDSR bool, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) {
logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, isILB=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, frontendVIPs, protocol, internalPort, externalPort)
var portMappingFlags uint32
portMappingFlags = 0
if isILB {
portMappingFlags = 1
}
var lbFlags uint32
lbFlags = 0
if isDSR {
lbFlags = 1 // EnableDirectServerReturn
}
func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) {
logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort)
loadBalancer := &HostComputeLoadBalancer{
SourceVIP: sourceVIP,
@ -310,7 +324,7 @@ func AddLoadBalancer(endpoints []HostComputeEndpoint, isILB bool, isDSR bool, so
Major: 2,
Minor: 0,
},
Flags: lbFlags,
Flags: flags,
}
for _, endpoint := range endpoints {

View File

@ -1,243 +0,0 @@
// +build integration
package hcn
import (
"encoding/json"
"fmt"
"testing"
)
func TestCreateDeleteLoadBalancer(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
loadBalancer, err := HcnCreateTestLoadBalancer(endpoint)
if err != nil {
t.Fatal(err)
}
jsonString, err := json.Marshal(loadBalancer)
if err != nil {
t.Fatal(err)
}
fmt.Printf("LoadBalancer JSON:\n%s \n", jsonString)
err = loadBalancer.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetLoadBalancerById(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
loadBalancer, err := HcnCreateTestLoadBalancer(endpoint)
if err != nil {
t.Fatal(err)
}
foundLB, err := GetLoadBalancerByID(loadBalancer.Id)
if err != nil {
t.Fatal(err)
}
if foundLB == nil {
t.Fatalf("No loadBalancer found")
}
err = loadBalancer.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestListLoadBalancer(t *testing.T) {
_, err := ListLoadBalancers()
if err != nil {
t.Fatal(err)
}
}
func TestLoadBalancerAddRemoveEndpoint(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
loadBalancer, err := HcnCreateTestLoadBalancer(endpoint)
if err != nil {
t.Fatal(err)
}
secondEndpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
updatedLB, err := loadBalancer.AddEndpoint(secondEndpoint)
if err != nil {
t.Fatal(err)
}
if len(updatedLB.HostComputeEndpoints) != 2 {
t.Fatalf("Endpoint not added to loadBalancer")
}
updatedLB, err = loadBalancer.RemoveEndpoint(secondEndpoint)
if err != nil {
t.Fatal(err)
}
if len(updatedLB.HostComputeEndpoints) != 1 {
t.Fatalf("Endpoint not removed from loadBalancer")
}
err = loadBalancer.Delete()
if err != nil {
t.Fatal(err)
}
err = secondEndpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestAddLoadBalancer(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
loadBalancer, err := AddLoadBalancer([]HostComputeEndpoint{*endpoint}, false, false, "10.0.0.1", []string{"1.1.1.2", "1.1.1.3"}, 6, 8080, 80)
if err != nil {
t.Fatal(err)
}
foundLB, err := GetLoadBalancerByID(loadBalancer.Id)
if err != nil {
t.Fatal(err)
}
if foundLB == nil {
t.Fatal(fmt.Errorf("No loadBalancer found"))
}
err = loadBalancer.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestAddDSRLoadBalancer(t *testing.T) {
network, err := CreateTestOverlayNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
loadBalancer, err := AddLoadBalancer([]HostComputeEndpoint{*endpoint}, false, true, "10.0.0.1", []string{"1.1.1.2", "1.1.1.3"}, 6, 8080, 80)
if err != nil {
t.Fatal(err)
}
foundLB, err := GetLoadBalancerByID(loadBalancer.Id)
if err != nil {
t.Fatal(err)
}
if foundLB == nil {
t.Fatal(fmt.Errorf("No loadBalancer found"))
}
err = loadBalancer.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestAddILBLoadBalancer(t *testing.T) {
network, err := CreateTestOverlayNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
loadBalancer, err := AddLoadBalancer([]HostComputeEndpoint{*endpoint}, true, false, "10.0.0.1", []string{"1.1.1.2", "1.1.1.3"}, 6, 8080, 80)
if err != nil {
t.Fatal(err)
}
foundLB, err := GetLoadBalancerByID(loadBalancer.Id)
if err != nil {
t.Fatal(err)
}
if foundLB == nil {
t.Fatal(fmt.Errorf("No loadBalancer found"))
}
err = loadBalancer.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,451 +0,0 @@
// +build integration
package hcn
import (
"encoding/json"
"fmt"
"testing"
"github.com/Microsoft/hcsshim/internal/cni"
"github.com/Microsoft/hcsshim/internal/guid"
)
func TestNewNamespace(t *testing.T) {
_ = NewNamespace(NamespaceTypeHost)
_ = NewNamespace(NamespaceTypeHostDefault)
_ = NewNamespace(NamespaceTypeGuest)
_ = NewNamespace(NamespaceTypeGuestDefault)
}
func TestCreateDeleteNamespace(t *testing.T) {
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
jsonString, err := json.Marshal(namespace)
if err != nil {
t.Fatal(err)
}
fmt.Printf("Namespace JSON:\n%s \n", jsonString)
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestCreateDeleteNamespaceGuest(t *testing.T) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeGuestDefault,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
hnsNamespace, err := namespace.Create()
if err != nil {
t.Fatal(err)
}
err = hnsNamespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetNamespaceById(t *testing.T) {
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
foundNamespace, err := GetNamespaceByID(namespace.Id)
if err != nil {
t.Fatal(err)
}
if foundNamespace == nil {
t.Fatal("No namespace found")
}
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestListNamespaces(t *testing.T) {
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
foundNamespaces, err := ListNamespaces()
if err != nil {
t.Fatal(err)
}
if len(foundNamespaces) == 0 {
t.Fatal("No Namespaces found")
}
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetNamespaceEndpointIds(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
err = endpoint.NamespaceAttach(namespace.Id)
if err != nil {
t.Fatal(err)
}
foundEndpoints, err := GetNamespaceEndpointIds(namespace.Id)
if err != nil {
t.Fatal(err)
}
if len(foundEndpoints) == 0 {
t.Fatal("No Endpoint found")
}
err = endpoint.NamespaceDetach(namespace.Id)
if err != nil {
t.Fatal(err)
}
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetNamespaceContainers(t *testing.T) {
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
foundEndpoints, err := GetNamespaceContainerIds(namespace.Id)
if err != nil {
t.Fatal(err)
}
if len(foundEndpoints) != 0 {
t.Fatal("Found containers when none should exist")
}
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestAddRemoveNamespaceEndpoint(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
err = AddNamespaceEndpoint(namespace.Id, endpoint.Id)
if err != nil {
t.Fatal(err)
}
foundEndpoints, err := GetNamespaceEndpointIds(namespace.Id)
if err != nil {
t.Fatal(err)
}
if len(foundEndpoints) == 0 {
t.Fatal("No Endpoint found")
}
err = RemoveNamespaceEndpoint(namespace.Id, endpoint.Id)
if err != nil {
t.Fatal(err)
}
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestModifyNamespaceSettings(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
endpoint, err := HcnCreateTestEndpoint(network)
if err != nil {
t.Fatal(err)
}
namespace, err := HcnCreateTestNamespace()
if err != nil {
t.Fatal(err)
}
mapA := map[string]string{"EndpointId": endpoint.Id}
settingsJson, err := json.Marshal(mapA)
if err != nil {
t.Fatal(err)
}
requestMessage := &ModifyNamespaceSettingRequest{
ResourceType: NamespaceResourceTypeEndpoint,
RequestType: RequestTypeAdd,
Settings: settingsJson,
}
err = ModifyNamespaceSettings(namespace.Id, requestMessage)
if err != nil {
t.Fatal(err)
}
foundEndpoints, err := GetNamespaceEndpointIds(namespace.Id)
if err != nil {
t.Fatal(err)
}
if len(foundEndpoints) == 0 {
t.Fatal("No Endpoint found")
}
err = RemoveNamespaceEndpoint(namespace.Id, endpoint.Id)
if err != nil {
t.Fatal(err)
}
err = namespace.Delete()
if err != nil {
t.Fatal(err)
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
// Sync Tests
func TestSyncNamespaceHostDefault(t *testing.T) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeHostDefault,
NamespaceId: 5,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
hnsNamespace, err := namespace.Create()
if err != nil {
t.Fatal(err)
}
// Host namespace types should be no-op success
err = hnsNamespace.Sync()
if err != nil {
t.Fatal(err)
}
err = hnsNamespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestSyncNamespaceHost(t *testing.T) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeHost,
NamespaceId: 5,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
hnsNamespace, err := namespace.Create()
if err != nil {
t.Fatal(err)
}
// Host namespace types should be no-op success
err = hnsNamespace.Sync()
if err != nil {
t.Fatal(err)
}
err = hnsNamespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestSyncNamespaceGuestNoReg(t *testing.T) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeGuest,
NamespaceId: 5,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
hnsNamespace, err := namespace.Create()
if err != nil {
t.Fatal(err)
}
// Guest namespace type with out reg state should be no-op success
err = hnsNamespace.Sync()
if err != nil {
t.Fatal(err)
}
err = hnsNamespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestSyncNamespaceGuestDefaultNoReg(t *testing.T) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeGuestDefault,
NamespaceId: 5,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
hnsNamespace, err := namespace.Create()
if err != nil {
t.Fatal(err)
}
// Guest namespace type with out reg state should be no-op success
err = hnsNamespace.Sync()
if err != nil {
t.Fatal(err)
}
err = hnsNamespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestSyncNamespaceGuest(t *testing.T) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeGuest,
NamespaceId: 5,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
hnsNamespace, err := namespace.Create()
if err != nil {
t.Fatal(err)
}
// Create registry state
pnc := cni.NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err = pnc.Store()
if err != nil {
pnc.Remove()
t.Fatal(err)
}
// Guest namespace type with reg state but not Vm shim should pass...
// after trying to connect to VM shim that it doesn't find and remove the Key so it doesn't look again.
err = hnsNamespace.Sync()
if err != nil {
t.Fatal(err)
}
err = pnc.Remove()
if err != nil {
t.Fatal(err)
}
err = hnsNamespace.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestSyncNamespaceGuestDefault(t *testing.T) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeGuestDefault,
NamespaceId: 5,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
hnsNamespace, err := namespace.Create()
if err != nil {
t.Fatal(err)
}
// Create registry state
pnc := cni.NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err = pnc.Store()
if err != nil {
pnc.Remove()
t.Fatal(err)
}
// Guest namespace type with reg state but not Vm shim should pass...
// after trying to connect to VM shim that it doesn't find and remove the Key so it doesn't look again.
err = hnsNamespace.Sync()
if err != nil {
t.Fatal(err)
}
err = pnc.Remove()
if err != nil {
t.Fatal(err)
}
err = hnsNamespace.Delete()
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,165 +0,0 @@
// +build integration
package hcn
import (
"encoding/json"
"fmt"
"testing"
)
func TestCreateDeleteNetwork(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
jsonString, err := json.Marshal(network)
if err != nil {
t.Fatal(err)
}
fmt.Printf("Network JSON:\n%s \n", jsonString)
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetNetworkByName(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
network, err = GetNetworkByName(network.Name)
if err != nil {
t.Fatal(err)
}
if network == nil {
t.Fatal("No Network found")
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestGetNetworkById(t *testing.T) {
network, err := HcnCreateTestNATNetwork()
if err != nil {
t.Fatal(err)
}
network, err = GetNetworkByID(network.Id)
if err != nil {
t.Fatal(err)
}
if network == nil {
t.Fatal("No Network found")
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestListNetwork(t *testing.T) {
_, err := ListNetworks()
if err != nil {
t.Fatal(err)
}
}
func testNetworkPolicy(t *testing.T, policiesToTest *PolicyNetworkRequest) {
network, err := CreateTestOverlayNetwork()
if err != nil {
t.Fatal(err)
}
network.AddPolicy(*policiesToTest)
//Reload the network object from HNS.
network, err = GetNetworkByID(network.Id)
if err != nil {
t.Fatal(err)
}
for _, policyToTest := range policiesToTest.Policies {
foundPolicy := false
for _, policy := range network.Policies {
if policy.Type == policyToTest.Type {
foundPolicy = true
break
}
}
if !foundPolicy {
t.Fatalf("Could not find %s policy on network.", policyToTest.Type)
}
}
network.RemovePolicy(*policiesToTest)
//Reload the network object from HNS.
network, err = GetNetworkByID(network.Id)
if err != nil {
t.Fatal(err)
}
for _, policyToTest := range policiesToTest.Policies {
foundPolicy := false
for _, policy := range network.Policies {
if policy.Type == policyToTest.Type {
foundPolicy = true
break
}
}
if foundPolicy {
t.Fatalf("Found %s policy on network when it should have been deleted.", policyToTest.Type)
}
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestAddRemoveRemoteSubnetRoutePolicy(t *testing.T) {
remoteSubnetRoutePolicy, err := HcnCreateTestRemoteSubnetRoute()
if err != nil {
t.Fatal(err)
}
testNetworkPolicy(t, remoteSubnetRoutePolicy)
}
func TestAddRemoveHostRoutePolicy(t *testing.T) {
hostRoutePolicy, err := HcnCreateTestHostRoute()
if err != nil {
t.Fatal(err)
}
testNetworkPolicy(t, hostRoutePolicy)
}
func TestNetworkFlags(t *testing.T) {
network, err := CreateTestOverlayNetwork()
if err != nil {
t.Fatal(err)
}
//Reload the network object from HNS.
network, err = GetNetworkByID(network.Id)
if err != nil {
t.Fatal(err)
}
if network.Flags != EnableNonPersistent {
t.Errorf("EnableNonPersistent flag (%d) is not set on network", EnableNonPersistent)
}
err = network.Delete()
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,62 +0,0 @@
// +build integration
package hcn
import (
"encoding/json"
"fmt"
"testing"
)
func TestSupportedFeatures(t *testing.T) {
supportedFeatures := GetSupportedFeatures()
jsonString, err := json.Marshal(supportedFeatures)
if err != nil {
t.Fatal(err)
}
fmt.Printf("Supported Features:\n%s \n", jsonString)
}
func TestV2ApiSupport(t *testing.T) {
supportedFeatures := GetSupportedFeatures()
err := V2ApiSupported()
if supportedFeatures.Api.V2 && err != nil {
t.Fatal(err)
}
if !supportedFeatures.Api.V2 && err == nil {
t.Fatal(err)
}
}
func TestRemoteSubnetSupport(t *testing.T) {
supportedFeatures := GetSupportedFeatures()
err := RemoteSubnetSupported()
if supportedFeatures.RemoteSubnet && err != nil {
t.Fatal(err)
}
if !supportedFeatures.RemoteSubnet && err == nil {
t.Fatal(err)
}
}
func TestHostRouteSupport(t *testing.T) {
supportedFeatures := GetSupportedFeatures()
err := HostRouteSupported()
if supportedFeatures.HostRoute && err != nil {
t.Fatal(err)
}
if !supportedFeatures.HostRoute && err == nil {
t.Fatal(err)
}
}
func TestDSRSupport(t *testing.T) {
supportedFeatures := GetSupportedFeatures()
err := DSRSupported()
if supportedFeatures.DSR && err != nil {
t.Fatal(err)
}
if !supportedFeatures.DSR && err == nil {
t.Fatal(err)
}
}

View File

@ -1,267 +0,0 @@
// +build integration
package hcn
import (
"encoding/json"
)
func cleanup(networkName string) {
// Delete test network (if exists)
testNetwork, err := GetNetworkByName(networkName)
if err != nil {
return
}
if testNetwork != nil {
err := testNetwork.Delete()
if err != nil {
return
}
}
}
func HcnCreateTestNATNetwork() (*HostComputeNetwork, error) {
cleanup(NatTestNetworkName)
network := &HostComputeNetwork{
Type: "NAT",
Name: NatTestNetworkName,
MacPool: MacPool{
Ranges: []MacRange{
{
StartMacAddress: "00-15-5D-52-C0-00",
EndMacAddress: "00-15-5D-52-CF-FF",
},
},
},
Ipams: []Ipam{
{
Type: "Static",
Subnets: []Subnet{
{
IpAddressPrefix: "192.168.100.0/24",
Routes: []Route{
{
NextHop: "192.168.100.1",
DestinationPrefix: "0.0.0.0",
},
},
},
},
},
},
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
return network.Create()
}
func CreateTestOverlayNetwork() (*HostComputeNetwork, error) {
cleanup(OverlayTestNetworkName)
network := &HostComputeNetwork{
Type: "Overlay",
Name: OverlayTestNetworkName,
MacPool: MacPool{
Ranges: []MacRange{
{
StartMacAddress: "00-15-5D-52-C0-00",
EndMacAddress: "00-15-5D-52-CF-FF",
},
},
},
Ipams: []Ipam{
{
Type: "Static",
Subnets: []Subnet{
{
IpAddressPrefix: "192.168.100.0/24",
Routes: []Route{
{
NextHop: "192.168.100.1",
DestinationPrefix: "0.0.0.0/0",
},
},
},
},
},
},
Flags: EnableNonPersistent,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
vsid := &VsidPolicySetting{
IsolationId: 5000,
}
vsidJson, err := json.Marshal(vsid)
if err != nil {
return nil, err
}
sp := &SubnetPolicy{
Type: VSID,
}
sp.Settings = vsidJson
spJson, err := json.Marshal(sp)
if err != nil {
return nil, err
}
network.Ipams[0].Subnets[0].Policies = append(network.Ipams[0].Subnets[0].Policies, spJson)
return network.Create()
}
func HcnCreateTestEndpoint(network *HostComputeNetwork) (*HostComputeEndpoint, error) {
if network == nil {
}
Endpoint := &HostComputeEndpoint{
Name: NatTestEndpointName,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
return network.CreateEndpoint(Endpoint)
}
func HcnCreateTestEndpointWithNamespace(network *HostComputeNetwork, namespace *HostComputeNamespace) (*HostComputeEndpoint, error) {
Endpoint := &HostComputeEndpoint{
Name: NatTestEndpointName,
HostComputeNamespace: namespace.Id,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
return network.CreateEndpoint(Endpoint)
}
func HcnCreateTestNamespace() (*HostComputeNamespace, error) {
namespace := &HostComputeNamespace{
Type: NamespaceTypeHostDefault,
NamespaceId: 5,
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
return namespace.Create()
}
func HcnCreateAcls() (*PolicyEndpointRequest, error) {
in := AclPolicySetting{
Protocols: "6",
Action: ActionTypeAllow,
Direction: DirectionTypeIn,
LocalAddresses: "192.168.100.0/24,10.0.0.21",
RemoteAddresses: "192.168.100.0/24,10.0.0.21",
LocalPorts: "80,8080",
RemotePorts: "80,8080",
RuleType: RuleTypeSwitch,
Priority: 200,
}
rawJSON, err := json.Marshal(in)
if err != nil {
return nil, err
}
inPolicy := EndpointPolicy{
Type: ACL,
Settings: rawJSON,
}
out := AclPolicySetting{
Protocols: "6",
Action: ActionTypeAllow,
Direction: DirectionTypeOut,
LocalAddresses: "192.168.100.0/24,10.0.0.21",
RemoteAddresses: "192.168.100.0/24,10.0.0.21",
LocalPorts: "80,8080",
RemotePorts: "80,8080",
RuleType: RuleTypeSwitch,
Priority: 200,
}
rawJSON, err = json.Marshal(out)
if err != nil {
return nil, err
}
outPolicy := EndpointPolicy{
Type: ACL,
Settings: rawJSON,
}
endpointRequest := PolicyEndpointRequest{
Policies: []EndpointPolicy{inPolicy, outPolicy},
}
return &endpointRequest, nil
}
func HcnCreateTestLoadBalancer(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) {
loadBalancer := &HostComputeLoadBalancer{
HostComputeEndpoints: []string{endpoint.Id},
SourceVIP: "10.0.0.1",
PortMappings: []LoadBalancerPortMapping{
{
Protocol: 6, // TCP
InternalPort: 8080,
ExternalPort: 8090,
},
},
FrontendVIPs: []string{"1.1.1.2", "1.1.1.3"},
SchemaVersion: SchemaVersion{
Major: 2,
Minor: 0,
},
}
return loadBalancer.Create()
}
func HcnCreateTestRemoteSubnetRoute() (*PolicyNetworkRequest, error) {
rsr := RemoteSubnetRoutePolicySetting{
DestinationPrefix: "192.168.2.0/24",
IsolationId: 5000,
ProviderAddress: "1.1.1.1",
DistributedRouterMacAddress: "00-12-34-56-78-9a",
}
rawJSON, err := json.Marshal(rsr)
if err != nil {
return nil, err
}
rsrPolicy := NetworkPolicy{
Type: RemoteSubnetRoute,
Settings: rawJSON,
}
networkRequest := PolicyNetworkRequest{
Policies: []NetworkPolicy{rsrPolicy},
}
return &networkRequest, nil
}
func HcnCreateTestHostRoute() (*PolicyNetworkRequest, error) {
hostRoutePolicy := NetworkPolicy{
Type: HostRoute,
Settings: []byte("{}"),
}
networkRequest := PolicyNetworkRequest{
Policies: []NetworkPolicy{hostRoutePolicy},
}
return &networkRequest, nil
}

View File

@ -1,111 +0,0 @@
// +build integration
package hcn
import (
"encoding/json"
"testing"
"github.com/Microsoft/hcsshim"
)
func TestV1Network(t *testing.T) {
cleanup(NatTestNetworkName)
v1network := hcsshim.HNSNetwork{
Type: "NAT",
Name: NatTestNetworkName,
MacPools: []hcsshim.MacPool{
{
StartMacAddress: "00-15-5D-52-C0-00",
EndMacAddress: "00-15-5D-52-CF-FF",
},
},
Subnets: []hcsshim.Subnet{
{
AddressPrefix: "192.168.100.0/24",
GatewayAddress: "192.168.100.1",
},
},
}
jsonString, err := json.Marshal(v1network)
if err != nil {
t.Fatal(err)
t.Fail()
}
network, err := createNetwork(string(jsonString))
if err != nil {
t.Fatal(err)
t.Fail()
}
err = network.Delete()
if err != nil {
t.Fatal(err)
t.Fail()
}
}
func TestV1Endpoint(t *testing.T) {
cleanup(NatTestNetworkName)
v1network := hcsshim.HNSNetwork{
Type: "NAT",
Name: NatTestNetworkName,
MacPools: []hcsshim.MacPool{
{
StartMacAddress: "00-15-5D-52-C0-00",
EndMacAddress: "00-15-5D-52-CF-FF",
},
},
Subnets: []hcsshim.Subnet{
{
AddressPrefix: "192.168.100.0/24",
GatewayAddress: "192.168.100.1",
},
},
}
jsonString, err := json.Marshal(v1network)
if err != nil {
t.Fatal(err)
t.Fail()
}
network, err := createNetwork(string(jsonString))
if err != nil {
t.Fatal(err)
t.Fail()
}
v1endpoint := hcsshim.HNSEndpoint{
Name: NatTestEndpointName,
VirtualNetwork: network.Id,
}
jsonString, err = json.Marshal(v1endpoint)
if err != nil {
t.Fatal(err)
t.Fail()
}
endpoint, err := createEndpoint(network.Id, string(jsonString))
if err != nil {
t.Fatal(err)
t.Fail()
}
err = endpoint.Delete()
if err != nil {
t.Fatal(err)
t.Fail()
}
err = network.Delete()
if err != nil {
t.Fatal(err)
t.Fail()
}
}

View File

@ -1,97 +0,0 @@
// +build integration
package hcn
import (
"os"
"testing"
"github.com/Microsoft/hcsshim"
)
const (
NatTestNetworkName string = "GoTestNat"
NatTestEndpointName string = "GoTestNatEndpoint"
OverlayTestNetworkName string = "GoTestOverlay"
)
func TestMain(m *testing.M) {
os.Exit(m.Run())
}
func CreateTestNetwork() (*hcsshim.HNSNetwork, error) {
network := &hcsshim.HNSNetwork{
Type: "NAT",
Name: NatTestNetworkName,
Subnets: []hcsshim.Subnet{
{
AddressPrefix: "192.168.100.0/24",
GatewayAddress: "192.168.100.1",
},
},
}
return network.Create()
}
func TestEndpoint(t *testing.T) {
network, err := CreateTestNetwork()
if err != nil {
t.Fatal(err)
}
Endpoint := &hcsshim.HNSEndpoint{
Name: NatTestEndpointName,
}
Endpoint, err = network.CreateEndpoint(Endpoint)
if err != nil {
t.Fatal(err)
}
err = Endpoint.HostAttach(1)
if err != nil {
t.Fatal(err)
}
err = Endpoint.HostDetach()
if err != nil {
t.Fatal(err)
}
_, err = Endpoint.Delete()
if err != nil {
t.Fatal(err)
}
_, err = network.Delete()
if err != nil {
t.Fatal(err)
}
}
func TestEndpointGetAll(t *testing.T) {
_, err := hcsshim.HNSListEndpointRequest()
if err != nil {
t.Fatal(err)
}
}
func TestNetworkGetAll(t *testing.T) {
_, err := hcsshim.HNSListNetworkRequest("GET", "", "")
if err != nil {
t.Fatal(err)
}
}
func TestNetwork(t *testing.T) {
network, err := CreateTestNetwork()
if err != nil {
t.Fatal(err)
}
_, err = network.Delete()
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,93 +0,0 @@
// Package appargs provides argument validation routines for use with
// github.com/urfave/cli.
package appargs
import (
"errors"
"strconv"
"github.com/urfave/cli"
)
// Validator is an argument validator function. It returns the number of
// arguments consumed or -1 on error.
type Validator = func([]string) int
// String is a validator for strings.
func String(args []string) int {
if len(args) == 0 {
return -1
}
return 1
}
// NonEmptyString is a validator for non-empty strings.
func NonEmptyString(args []string) int {
if len(args) == 0 || args[0] == "" {
return -1
}
return 1
}
// Int returns a validator for integers.
func Int(base int, min int, max int) Validator {
return func(args []string) int {
if len(args) == 0 {
return -1
}
i, err := strconv.ParseInt(args[0], base, 0)
if err != nil || int(i) < min || int(i) > max {
return -1
}
return 1
}
}
// Optional returns a validator that treats an argument as optional.
func Optional(v Validator) Validator {
return func(args []string) int {
if len(args) == 0 {
return 0
}
return v(args)
}
}
// Rest returns a validator that validates each of the remaining arguments.
func Rest(v Validator) Validator {
return func(args []string) int {
count := len(args)
for len(args) != 0 {
n := v(args)
if n < 0 {
return n
}
args = args[n:]
}
return count
}
}
// ErrInvalidUsage is returned when there is a validation error.
var ErrInvalidUsage = errors.New("invalid command usage")
// Validate can be used as a command's Before function to validate the arguments
// to the command.
func Validate(vs ...Validator) cli.BeforeFunc {
return func(context *cli.Context) error {
remaining := context.Args()
for _, v := range vs {
consumed := v(remaining)
if consumed < 0 {
return ErrInvalidUsage
}
remaining = remaining[consumed:]
}
if len(remaining) > 0 {
return ErrInvalidUsage
}
return nil
}
}

View File

@ -1,137 +0,0 @@
package cni
import (
"testing"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/regstate"
)
func Test_LoadPersistedNamespaceConfig_NoConfig(t *testing.T) {
pnc, err := LoadPersistedNamespaceConfig(t.Name())
if pnc != nil {
t.Fatal("config should be nil")
}
if err == nil {
t.Fatal("err should be set")
} else {
if !regstate.IsNotFoundError(err) {
t.Fatal("err should be NotFoundError")
}
}
}
func Test_LoadPersistedNamespaceConfig_WithConfig(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store failed with: %v", err)
}
defer pnc.Remove()
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
if err != nil {
t.Fatal("should have no error on stored config")
}
if pnc2 == nil {
t.Fatal("stored config should have been returned")
} else {
if pnc.namespaceID != pnc2.namespaceID {
t.Fatal("actual/stored namespaceID not equal")
}
if pnc.ContainerID != pnc2.ContainerID {
t.Fatal("actual/stored ContainerID not equal")
}
if pnc.HostUniqueID != pnc2.HostUniqueID {
t.Fatal("actual/stored HostUniqueID not equal")
}
if !pnc2.stored {
t.Fatal("stored should be true for registry load")
}
}
}
func Test_PersistedNamespaceConfig_StoreNew(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store failed with: %v", err)
}
defer pnc.Remove()
}
func Test_PersistedNamespaceConfig_StoreUpdate(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store failed with: %v", err)
}
defer pnc.Remove()
pnc.ContainerID = "test-container2"
pnc.HostUniqueID = guid.New()
err = pnc.Store()
if err != nil {
pnc.Remove()
t.Fatalf("store update failed with: %v", err)
}
// Verify the update
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
if err != nil {
t.Fatal("stored config should have been returned")
}
if pnc.ContainerID != pnc2.ContainerID {
t.Fatal("actual/stored ContainerID not equal")
}
if pnc.HostUniqueID != pnc2.HostUniqueID {
t.Fatal("actual/stored HostUniqueID not equal")
}
}
func Test_PersistedNamespaceConfig_RemoveNotStored(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Remove()
if err != nil {
t.Fatalf("remove on not stored should not fail: %v", err)
}
}
func Test_PersistedNamespaceConfig_RemoveStoredKey(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
t.Fatalf("store failed with: %v", err)
}
err = pnc.Remove()
if err != nil {
t.Fatalf("remove on stored key should not fail: %v", err)
}
}
func Test_PersistedNamespaceConfig_RemovedOtherKey(t *testing.T) {
pnc := NewPersistedNamespaceConfig(t.Name(), "test-container", guid.New())
err := pnc.Store()
if err != nil {
t.Fatalf("store failed with: %v", err)
}
pnc2, err := LoadPersistedNamespaceConfig(t.Name())
if err != nil {
t.Fatal("should of found stored config")
}
err = pnc.Remove()
if err != nil {
t.Fatalf("remove on stored key should not fail: %v", err)
}
// Now remove the other key that has the invalid memory state
err = pnc2.Remove()
if err != nil {
t.Fatalf("remove on in-memory already removed should not fail: %v", err)
}
}

View File

@ -1,40 +0,0 @@
package copyfile
import (
"fmt"
"syscall"
"unsafe"
)
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procCopyFileW = modkernel32.NewProc("CopyFileW")
)
// CopyFile is a utility for copying a file - used for the LCOW scratch cache.
// Uses CopyFileW win32 API for performance.
func CopyFile(srcFile, destFile string, overwrite bool) error {
var bFailIfExists uint32 = 1
if overwrite {
bFailIfExists = 0
}
lpExistingFileName, err := syscall.UTF16PtrFromString(srcFile)
if err != nil {
return err
}
lpNewFileName, err := syscall.UTF16PtrFromString(destFile)
if err != nil {
return err
}
r1, _, err := syscall.Syscall(
procCopyFileW.Addr(),
3,
uintptr(unsafe.Pointer(lpExistingFileName)),
uintptr(unsafe.Pointer(lpNewFileName)),
uintptr(bFailIfExists))
if r1 == 0 {
return fmt.Errorf("failed CopyFileW Win32 call from '%s' to '%s': %s", srcFile, destFile, err)
}
return nil
}

View File

@ -1,103 +0,0 @@
package copywithtimeout
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"strconv"
"syscall"
"time"
"github.com/sirupsen/logrus"
)
// logDataByteCount is for an advanced debugging technique to allow
// data read/written to a processes stdio channels hex-dumped to the
// log when running at debug level or higher. It is controlled through
// the environment variable HCSSHIM_LOG_DATA_BYTE_COUNT
var logDataByteCount int64
func init() {
bytes := os.Getenv("HCSSHIM_LOG_DATA_BYTE_COUNT")
if len(bytes) > 0 {
u, err := strconv.ParseUint(bytes, 10, 32)
if err == nil {
logDataByteCount = int64(u)
}
}
}
// Copy is a wrapper for io.Copy using a timeout duration
func Copy(dst io.Writer, src io.Reader, size int64, context string, timeout time.Duration) (int64, error) {
logrus.WithFields(logrus.Fields{
"stdval": context,
"size": size,
"timeout": timeout,
}).Debug("hcsshim::copywithtimeout - Begin")
type resultType struct {
err error
bytes int64
}
done := make(chan resultType, 1)
go func() {
result := resultType{}
if logrus.GetLevel() < logrus.DebugLevel || logDataByteCount == 0 {
result.bytes, result.err = io.Copy(dst, src)
} else {
// In advanced debug mode where we log (hexdump format) what is copied
// up to the number of bytes defined by environment variable
// HCSSHIM_LOG_DATA_BYTE_COUNT
var buf bytes.Buffer
tee := io.TeeReader(src, &buf)
result.bytes, result.err = io.Copy(dst, tee)
if result.err == nil {
size := result.bytes
if size > logDataByteCount {
size = logDataByteCount
}
if size > 0 {
bytes := make([]byte, size)
if _, err := buf.Read(bytes); err == nil {
logrus.Debugf("hcsshim::copyWithTimeout - Read bytes\n%s", hex.Dump(bytes))
}
}
}
}
done <- result
}()
var result resultType
timedout := time.After(timeout)
select {
case <-timedout:
return 0, fmt.Errorf("hcsshim::copyWithTimeout: timed out (%s)", context)
case result = <-done:
if result.err != nil && result.err != io.EOF {
// See https://github.com/golang/go/blob/f3f29d1dea525f48995c1693c609f5e67c046893/src/os/exec/exec_windows.go for a clue as to why we are doing this :)
if se, ok := result.err.(syscall.Errno); ok {
const (
errNoData = syscall.Errno(232)
errBrokenPipe = syscall.Errno(109)
)
if se == errNoData || se == errBrokenPipe {
logrus.WithFields(logrus.Fields{
"stdval": context,
logrus.ErrorKey: se,
}).Debug("hcsshim::copywithtimeout - End")
return result.bytes, nil
}
}
return 0, fmt.Errorf("hcsshim::copyWithTimeout: error reading: '%s' after %d bytes (%s)", result.err, result.bytes, context)
}
}
logrus.WithFields(logrus.Fields{
"stdval": context,
"copied-bytes": result.bytes,
}).Debug("hcsshim::copywithtimeout - Completed Successfully")
return result.bytes, nil
}

View File

@ -1,136 +0,0 @@
package guid
import (
"encoding/json"
"fmt"
"testing"
)
func Test_New(t *testing.T) {
g := New()
g2 := New()
if g == g2 {
t.Fatal("GUID's should not be equal when generated")
}
}
func Test_FromString(t *testing.T) {
g := New()
g2 := FromString(g.String())
if g != g2 {
t.Fatalf("GUID's not equal %v, %v", g, g2)
}
}
func Test_MarshalJSON(t *testing.T) {
g := New()
gs := g.String()
js, err := json.Marshal(g)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("\"%s\"", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_MarshalJSON_Ptr(t *testing.T) {
g := New()
gs := g.String()
js, err := json.Marshal(&g)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("\"%s\"", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_MarshalJSON_Nested(t *testing.T) {
type test struct {
G GUID
}
t1 := test{
G: New(),
}
gs := t1.G.String()
js, err := json.Marshal(t1)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_MarshalJSON_Nested_Ptr(t *testing.T) {
type test struct {
G *GUID
}
v := New()
t1 := test{
G: &v,
}
gs := t1.G.String()
js, err := json.Marshal(t1)
if err != nil {
t.Fatalf("failed to marshal with %v", err)
}
gsJSON := fmt.Sprintf("{\"G\":\"%s\"}", gs)
if gsJSON != string(js) {
t.Fatalf("failed to marshal %s != %s", gsJSON, string(js))
}
}
func Test_UnmarshalJSON(t *testing.T) {
g := New()
js, _ := json.Marshal(g)
var g2 GUID
err := json.Unmarshal(js, &g2)
if err != nil {
t.Fatalf("failed to unmarshal with: %v", err)
}
if g != g2 {
t.Fatalf("failed to unmarshal %s != %s", g, g2)
}
}
func Test_UnmarshalJSON_Nested(t *testing.T) {
type test struct {
G GUID
}
t1 := test{
G: New(),
}
js, _ := json.Marshal(t1)
var t2 test
err := json.Unmarshal(js, &t2)
if err != nil {
t.Fatalf("failed to unmarshal with: %v", err)
}
if t1.G != t2.G {
t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G)
}
}
func Test_UnmarshalJSON_Nested_Ptr(t *testing.T) {
type test struct {
G *GUID
}
v := New()
t1 := test{
G: &v,
}
js, _ := json.Marshal(t1)
var t2 test
err := json.Unmarshal(js, &t2)
if err != nil {
t.Fatalf("failed to unmarshal with: %v", err)
}
if *t1.G != *t2.G {
t.Fatalf("failed to unmarshal %v != %v", t1.G, t2.G)
}
}

View File

@ -7,9 +7,14 @@ func logOperationBegin(ctx logrus.Fields, msg string) {
}
func logOperationEnd(ctx logrus.Fields, msg string, err error) {
// Copy the log and fields first.
log := logrus.WithFields(ctx)
if err == nil {
logrus.WithFields(ctx).Debug(msg)
log.Debug(msg)
} else {
logrus.WithFields(ctx).WithError(err).Error(msg)
// Edit only the copied field data to avoid race conditions on the
// write.
log.Data[logrus.ErrorKey] = err
log.Error(msg)
}
}

View File

@ -31,9 +31,8 @@ func newProcess(process hcsProcess, processID int, computeSystem *System) *Proce
processID: processID,
system: computeSystem,
logctx: logrus.Fields{
logfields.HCSOperation: "",
logfields.ContainerID: computeSystem.ID(),
logfields.ProcessID: processID,
logfields.ContainerID: computeSystem.ID(),
logfields.ProcessID: processID,
},
}
}
@ -88,13 +87,12 @@ func (process *Process) SystemID() string {
}
func (process *Process) logOperationBegin(operation string) {
process.logctx[logfields.HCSOperation] = operation
logOperationBegin(
process.logctx,
"hcsshim::Process - Begin Operation")
operation+" - Begin Operation")
}
func (process *Process) logOperationEnd(err error) {
func (process *Process) logOperationEnd(operation string, err error) {
var result string
if err == nil {
result = "Success"
@ -104,9 +102,8 @@ func (process *Process) logOperationEnd(err error) {
logOperationEnd(
process.logctx,
"hcsshim::Process - End Operation - "+result,
operation+" - End Operation - "+result,
err)
process.logctx[logfields.HCSOperation] = ""
}
// Signal signals the process with `options`.
@ -116,7 +113,7 @@ func (process *Process) Signal(options guestrequest.SignalProcessOptions) (err e
operation := "hcsshim::Process::Signal"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -148,7 +145,7 @@ func (process *Process) Kill() (err error) {
operation := "hcsshim::Process::Kill"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -170,7 +167,7 @@ func (process *Process) Kill() (err error) {
func (process *Process) Wait() (err error) {
operation := "hcsshim::Process::Wait"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
@ -185,7 +182,7 @@ func (process *Process) Wait() (err error) {
func (process *Process) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcssshim::Process::WaitTimeout"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
err = waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
@ -202,7 +199,7 @@ func (process *Process) ResizeConsole(width, height uint16) (err error) {
operation := "hcsshim::Process::ResizeConsole"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -239,7 +236,7 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
operation := "hcsshim::Process::Properties"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -275,7 +272,7 @@ func (process *Process) Properties() (_ *ProcessStatus, err error) {
func (process *Process) ExitCode() (_ int, err error) {
operation := "hcsshim::Process::ExitCode"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
properties, err := process.Properties()
if err != nil {
@ -302,7 +299,7 @@ func (process *Process) Stdio() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadClo
operation := "hcsshim::Process::Stdio"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -346,7 +343,7 @@ func (process *Process) CloseStdin() (err error) {
operation := "hcsshim::Process::CloseStdin"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
@ -384,7 +381,7 @@ func (process *Process) Close() (err error) {
operation := "hcsshim::Process::Close"
process.logOperationBegin(operation)
defer func() { process.logOperationEnd(err) }()
defer func() { process.logOperationEnd(operation, err) }()
// Don't double free this
if process.handle == 0 {

View File

@ -49,20 +49,18 @@ func newSystem(id string) *System {
return &System{
id: id,
logctx: logrus.Fields{
logfields.HCSOperation: "",
logfields.ContainerID: id,
logfields.ContainerID: id,
},
}
}
func (computeSystem *System) logOperationBegin(operation string) {
computeSystem.logctx[logfields.HCSOperation] = operation
logOperationBegin(
computeSystem.logctx,
"hcsshim::ComputeSystem - Begin Operation")
operation+" - Begin Operation")
}
func (computeSystem *System) logOperationEnd(err error) {
func (computeSystem *System) logOperationEnd(operation string, err error) {
var result string
if err == nil {
result = "Success"
@ -72,9 +70,8 @@ func (computeSystem *System) logOperationEnd(err error) {
logOperationEnd(
computeSystem.logctx,
"hcsshim::ComputeSystem - End Operation - "+result,
operation+" - End Operation - "+result,
err)
computeSystem.logctx[logfields.HCSOperation] = ""
}
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
@ -83,7 +80,7 @@ func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (_ *System
computeSystem := newSystem(id)
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
if err != nil {
@ -135,9 +132,9 @@ func OpenComputeSystem(id string) (_ *System, err error) {
computeSystem.logOperationBegin(operation)
defer func() {
if IsNotExist(err) {
computeSystem.logOperationEnd(nil)
computeSystem.logOperationEnd(operation, nil)
} else {
computeSystem.logOperationEnd(err)
computeSystem.logOperationEnd(operation, err)
}
}()
@ -163,12 +160,10 @@ func OpenComputeSystem(id string) (_ *System, err error) {
// GetComputeSystems gets a list of the compute systems on the system that match the query
func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerProperties, err error) {
operation := "hcsshim::GetComputeSystems"
fields := logrus.Fields{
logfields.HCSOperation: operation,
}
fields := logrus.Fields{}
logOperationBegin(
fields,
"hcsshim::ComputeSystem - Begin Operation")
operation+" - Begin Operation")
defer func() {
var result string
@ -180,7 +175,7 @@ func GetComputeSystems(q schema1.ComputeSystemQuery) (_ []schema1.ContainerPrope
logOperationEnd(
fields,
"hcsshim::ComputeSystem - End Operation - "+result,
operation+" - End Operation - "+result,
err)
}()
@ -227,7 +222,7 @@ func (computeSystem *System) Start() (err error) {
operation := "hcsshim::ComputeSystem::Start"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
@ -286,9 +281,9 @@ func (computeSystem *System) Shutdown() (err error) {
computeSystem.logOperationBegin(operation)
defer func() {
if IsAlreadyStopped(err) {
computeSystem.logOperationEnd(nil)
computeSystem.logOperationEnd(operation, nil)
} else {
computeSystem.logOperationEnd(err)
computeSystem.logOperationEnd(operation, err)
}
}()
@ -318,9 +313,9 @@ func (computeSystem *System) Terminate() (err error) {
computeSystem.logOperationBegin(operation)
defer func() {
if IsPending(err) {
computeSystem.logOperationEnd(nil)
computeSystem.logOperationEnd(operation, nil)
} else {
computeSystem.logOperationEnd(err)
computeSystem.logOperationEnd(operation, err)
}
}()
@ -344,7 +339,7 @@ func (computeSystem *System) Terminate() (err error) {
func (computeSystem *System) Wait() (err error) {
operation := "hcsshim::ComputeSystem::Wait"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
@ -359,10 +354,10 @@ func (computeSystem *System) Wait() (err error) {
func (computeSystem *System) WaitExpectedError(expected error) (err error) {
operation := "hcsshim::ComputeSystem::WaitExpectedError"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil && err != expected {
if err != nil && getInnerError(err) != expected {
return makeSystemError(computeSystem, "WaitExpectedError", "", err, nil)
}
@ -374,7 +369,7 @@ func (computeSystem *System) WaitExpectedError(expected error) (err error) {
func (computeSystem *System) WaitTimeout(timeout time.Duration) (err error) {
operation := "hcsshim::ComputeSystem::WaitTimeout"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
err = waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
@ -390,7 +385,7 @@ func (computeSystem *System) Properties(types ...schema1.PropertyType) (_ *schem
operation := "hcsshim::ComputeSystem::Properties"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
queryj, err := json.Marshal(schema1.PropertyQuery{types})
if err != nil {
@ -429,7 +424,7 @@ func (computeSystem *System) Pause() (err error) {
operation := "hcsshim::ComputeSystem::Pause"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
@ -454,7 +449,7 @@ func (computeSystem *System) Resume() (err error) {
operation := "hcsshim::ComputeSystem::Resume"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
@ -479,7 +474,7 @@ func (computeSystem *System) CreateProcess(c interface{}) (_ *Process, err error
operation := "hcsshim::ComputeSystem::CreateProcess"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
var (
processInfo hcsProcessInformation
@ -539,7 +534,7 @@ func (computeSystem *System) OpenProcess(pid int) (_ *Process, err error) {
operation := "hcsshim::ComputeSystem::OpenProcess"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
var (
processHandle hcsProcess
@ -573,7 +568,7 @@ func (computeSystem *System) Close() (err error) {
operation := "hcsshim::ComputeSystem::Close"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
// Don't double free this
if computeSystem.handle == 0 {
@ -660,7 +655,7 @@ func (computeSystem *System) Modify(config interface{}) (err error) {
operation := "hcsshim::ComputeSystem::Modify"
computeSystem.logOperationBegin(operation)
defer func() { computeSystem.logOperationEnd(err) }()
defer func() { computeSystem.logOperationEnd(operation, err) }()
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)

View File

@ -1,173 +0,0 @@
// +build windows
package hcsoci
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// CreateOptions are the set of fields used to call CreateContainer().
// Note: In the spec, the LayerFolders must be arranged in the same way in which
// moby configures them: layern, layern-1,...,layer2,layer1,scratch
// where layer1 is the base read-only layer, layern is the top-most read-only
// layer, and scratch is the RW layer. This is for historical reasons only.
type CreateOptions struct {
// Common parameters
ID string // Identifier for the container
Owner string // Specifies the owner. Defaults to executable name.
Spec *specs.Spec // Definition of the container or utility VM being created
SchemaVersion *hcsschema.Version // Requested Schema Version. Defaults to v2 for RS5, v1 for RS1..RS4
HostingSystem *uvm.UtilityVM // Utility or service VM in which the container is to be created.
NetworkNamespace string // Host network namespace to use (overrides anything in the spec)
// This is an advanced debugging parameter. It allows for diagnosibility by leaving a containers
// resources allocated in case of a failure. Thus you would be able to use tools such as hcsdiag
// to look at the state of a utility VM to see what resources were allocated. Obviously the caller
// must a) not tear down the utility VM on failure (or pause in some way) and b) is responsible for
// performing the ReleaseResources() call themselves.
DoNotReleaseResourcesOnFailure bool
}
// createOptionsInternal is the set of user-supplied create options, but includes internal
// fields for processing the request once user-supplied stuff has been validated.
type createOptionsInternal struct {
*CreateOptions
actualSchemaVersion *hcsschema.Version // Calculated based on Windows build and optional caller-supplied override
actualID string // Identifier for the container
actualOwner string // Owner for the container
actualNetworkNamespace string
}
// CreateContainer creates a container. It can cope with a wide variety of
// scenarios, including v1 HCS schema calls, as well as more complex v2 HCS schema
// calls. Note we always return the resources that have been allocated, even in the
// case of an error. This provides support for the debugging option not to
// release the resources on failure, so that the client can make the necessary
// call to release resources that have been allocated as part of calling this function.
func CreateContainer(createOptions *CreateOptions) (_ *hcs.System, _ *Resources, err error) {
logrus.Debugf("hcsshim::CreateContainer options: %+v", createOptions)
coi := &createOptionsInternal{
CreateOptions: createOptions,
actualID: createOptions.ID,
actualOwner: createOptions.Owner,
}
// Defaults if omitted by caller.
if coi.actualID == "" {
coi.actualID = guid.New().String()
}
if coi.actualOwner == "" {
coi.actualOwner = filepath.Base(os.Args[0])
}
if coi.Spec == nil {
return nil, nil, fmt.Errorf("Spec must be supplied")
}
if coi.HostingSystem != nil {
// By definition, a hosting system can only be supplied for a v2 Xenon.
coi.actualSchemaVersion = schemaversion.SchemaV21()
} else {
coi.actualSchemaVersion = schemaversion.DetermineSchemaVersion(coi.SchemaVersion)
logrus.Debugf("hcsshim::CreateContainer using schema %s", schemaversion.String(coi.actualSchemaVersion))
}
resources := &Resources{}
defer func() {
if err != nil {
if !coi.DoNotReleaseResourcesOnFailure {
ReleaseResources(resources, coi.HostingSystem, true)
}
}
}()
if coi.HostingSystem != nil {
n := coi.HostingSystem.ContainerCounter()
if coi.Spec.Linux != nil {
resources.containerRootInUVM = "/run/gcs/c/" + strconv.FormatUint(n, 16)
} else {
resources.containerRootInUVM = `C:\c\` + strconv.FormatUint(n, 16)
}
}
// Create a network namespace if necessary.
if coi.Spec.Windows != nil &&
coi.Spec.Windows.Network != nil &&
schemaversion.IsV21(coi.actualSchemaVersion) {
if coi.NetworkNamespace != "" {
resources.netNS = coi.NetworkNamespace
} else {
err := createNetworkNamespace(coi, resources)
if err != nil {
return nil, resources, err
}
}
coi.actualNetworkNamespace = resources.netNS
if coi.HostingSystem != nil {
endpoints, err := getNamespaceEndpoints(coi.actualNetworkNamespace)
if err != nil {
return nil, resources, err
}
err = coi.HostingSystem.AddNetNS(coi.actualNetworkNamespace, endpoints)
if err != nil {
return nil, resources, err
}
resources.addedNetNSToVM = true
}
}
var hcsDocument interface{}
logrus.Debugf("hcsshim::CreateContainer allocating resources")
if coi.Spec.Linux != nil {
if schemaversion.IsV10(coi.actualSchemaVersion) {
return nil, resources, errors.New("LCOW v1 not supported")
}
logrus.Debugf("hcsshim::CreateContainer allocateLinuxResources")
err = allocateLinuxResources(coi, resources)
if err != nil {
logrus.Debugf("failed to allocateLinuxResources %s", err)
return nil, resources, err
}
hcsDocument, err = createLinuxContainerDocument(coi, resources.containerRootInUVM)
if err != nil {
logrus.Debugf("failed createHCSContainerDocument %s", err)
return nil, resources, err
}
} else {
err = allocateWindowsResources(coi, resources)
if err != nil {
logrus.Debugf("failed to allocateWindowsResources %s", err)
return nil, resources, err
}
logrus.Debugf("hcsshim::CreateContainer creating container document")
hcsDocument, err = createWindowsContainerDocument(coi)
if err != nil {
logrus.Debugf("failed createHCSContainerDocument %s", err)
return nil, resources, err
}
}
logrus.Debugf("hcsshim::CreateContainer creating compute system")
system, err := hcs.CreateComputeSystem(coi.actualID, hcsDocument)
if err != nil {
logrus.Debugf("failed to CreateComputeSystem %s", err)
return nil, resources, err
}
return system, resources, err
}

View File

@ -1,78 +0,0 @@
// +build windows,functional
//
// These unit tests must run on a system setup to run both Argons and Xenons,
// have docker installed, and have the nanoserver (WCOW) and alpine (LCOW)
// base images installed. The nanoserver image MUST match the build of the
// host.
//
// We rely on docker as the tools to extract a container image aren't
// open source. We use it to find the location of the base image on disk.
//
package hcsoci
//import (
// "bytes"
// "encoding/json"
// "io/ioutil"
// "os"
// "os/exec"
// "path/filepath"
// "strings"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// _ "github.com/Microsoft/hcsshim/test/assets"
// specs "github.com/opencontainers/runtime-spec/specs-go"
// "github.com/sirupsen/logrus"
//)
//func startUVM(t *testing.T, uvm *UtilityVM) {
// if err := uvm.Start(); err != nil {
// t.Fatalf("UVM %s Failed start: %s", uvm.Id, err)
// }
//}
//// Helper to shoot a utility VM
//func terminateUtilityVM(t *testing.T, uvm *UtilityVM) {
// if err := uvm.Terminate(); err != nil {
// t.Fatalf("Failed terminate utility VM %s", err)
// }
//}
//// TODO: Test UVMResourcesFromContainerSpec
//func TestUVMSizing(t *testing.T) {
// t.Skip("for now - not implemented at all")
//}
//// TestID validates that the requested ID is retrieved
//func TestID(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// Id: "gruntbuggly",
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// if c.ID() != "gruntbuggly" {
// t.Fatalf("id not set correctly: %s", c.ID())
// }
// c.Terminate()
//}

View File

@ -1,115 +0,0 @@
// +build windows
package hcsoci
import (
"encoding/json"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
func createLCOWSpec(coi *createOptionsInternal) (*specs.Spec, error) {
// Remarshal the spec to perform a deep copy.
j, err := json.Marshal(coi.Spec)
if err != nil {
return nil, err
}
spec := &specs.Spec{}
err = json.Unmarshal(j, spec)
if err != nil {
return nil, err
}
// TODO
// Translate the mounts. The root has already been translated in
// allocateLinuxResources.
/*
for i := range spec.Mounts {
spec.Mounts[i].Source = "???"
spec.Mounts[i].Destination = "???"
}
*/
// Linux containers don't care about Windows aspects of the spec except the
// network namespace
spec.Windows = nil
if coi.Spec.Windows != nil &&
coi.Spec.Windows.Network != nil &&
coi.Spec.Windows.Network.NetworkNamespace != "" {
spec.Windows = &specs.Windows{
Network: &specs.WindowsNetwork{
NetworkNamespace: coi.Spec.Windows.Network.NetworkNamespace,
},
}
}
// Hooks are not supported (they should be run in the host)
spec.Hooks = nil
// Clear unsupported features
if spec.Linux.Resources != nil {
spec.Linux.Resources.Devices = nil
spec.Linux.Resources.Memory = nil
spec.Linux.Resources.Pids = nil
spec.Linux.Resources.BlockIO = nil
spec.Linux.Resources.HugepageLimits = nil
spec.Linux.Resources.Network = nil
}
spec.Linux.Seccomp = nil
// Clear any specified namespaces
var namespaces []specs.LinuxNamespace
for _, ns := range spec.Linux.Namespaces {
switch ns.Type {
case specs.NetworkNamespace:
default:
ns.Path = ""
namespaces = append(namespaces, ns)
}
}
spec.Linux.Namespaces = namespaces
return spec, nil
}
// This is identical to hcsschema.ComputeSystem but HostedSystem is an LCOW specific type - the schema docs only include WCOW.
type linuxComputeSystem struct {
Owner string `json:"Owner,omitempty"`
SchemaVersion *hcsschema.Version `json:"SchemaVersion,omitempty"`
HostingSystemId string `json:"HostingSystemId,omitempty"`
HostedSystem *linuxHostedSystem `json:"HostedSystem,omitempty"`
Container *hcsschema.Container `json:"Container,omitempty"`
VirtualMachine *hcsschema.VirtualMachine `json:"VirtualMachine,omitempty"`
ShouldTerminateOnLastHandleClosed bool `json:"ShouldTerminateOnLastHandleClosed,omitempty"`
}
type linuxHostedSystem struct {
SchemaVersion *hcsschema.Version
OciBundlePath string
OciSpecification *specs.Spec
}
func createLinuxContainerDocument(coi *createOptionsInternal, guestRoot string) (interface{}, error) {
spec, err := createLCOWSpec(coi)
if err != nil {
return nil, err
}
logrus.Debugf("hcsshim::createLinuxContainerDoc: guestRoot:%s", guestRoot)
v2 := &linuxComputeSystem{
Owner: coi.actualOwner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
HostingSystemId: coi.HostingSystem.ID(),
HostedSystem: &linuxHostedSystem{
SchemaVersion: schemaversion.SchemaV21(),
OciBundlePath: guestRoot,
OciSpecification: spec,
},
}
return v2, nil
}

View File

@ -1,273 +0,0 @@
// +build windows
package hcsoci
import (
"fmt"
"path/filepath"
"regexp"
"runtime"
"strings"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/uvmfolder"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
// createWindowsContainerDocument creates a document suitable for calling HCS to create
// a container, both hosted and process isolated. It can create both v1 and v2
// schema, WCOW only. The containers storage should have been mounted already.
func createWindowsContainerDocument(coi *createOptionsInternal) (interface{}, error) {
logrus.Debugf("hcsshim: CreateHCSContainerDocument")
// TODO: Make this safe if exported so no null pointer dereferences.
if coi.Spec == nil {
return nil, fmt.Errorf("cannot create HCS container document - OCI spec is missing")
}
if coi.Spec.Windows == nil {
return nil, fmt.Errorf("cannot create HCS container document - OCI spec Windows section is missing ")
}
v1 := &schema1.ContainerConfig{
SystemType: "Container",
Name: coi.actualID,
Owner: coi.actualOwner,
HvPartition: false,
IgnoreFlushesDuringBoot: coi.Spec.Windows.IgnoreFlushesDuringBoot,
}
// IgnoreFlushesDuringBoot is a property of the SCSI attachment for the scratch. Set when it's hot-added to the utility VM
// ID is a property on the create call in V2 rather than part of the schema.
v2 := &hcsschema.ComputeSystem{
Owner: coi.actualOwner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
}
v2Container := &hcsschema.Container{Storage: &hcsschema.Storage{}}
// TODO: Still want to revisit this.
if coi.Spec.Windows.LayerFolders == nil || len(coi.Spec.Windows.LayerFolders) < 2 {
return nil, fmt.Errorf("invalid spec - not enough layer folders supplied")
}
if coi.Spec.Hostname != "" {
v1.HostName = coi.Spec.Hostname
v2Container.GuestOs = &hcsschema.GuestOs{HostName: coi.Spec.Hostname}
}
if coi.Spec.Windows.Resources != nil {
if coi.Spec.Windows.Resources.CPU != nil {
if coi.Spec.Windows.Resources.CPU.Count != nil ||
coi.Spec.Windows.Resources.CPU.Shares != nil ||
coi.Spec.Windows.Resources.CPU.Maximum != nil {
v2Container.Processor = &hcsschema.Processor{}
}
if coi.Spec.Windows.Resources.CPU.Count != nil {
cpuCount := *coi.Spec.Windows.Resources.CPU.Count
hostCPUCount := uint64(runtime.NumCPU())
if cpuCount > hostCPUCount {
logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
cpuCount = hostCPUCount
}
v1.ProcessorCount = uint32(cpuCount)
v2Container.Processor.Count = int32(cpuCount)
}
if coi.Spec.Windows.Resources.CPU.Shares != nil {
v1.ProcessorWeight = uint64(*coi.Spec.Windows.Resources.CPU.Shares)
v2Container.Processor.Weight = int32(v1.ProcessorWeight)
}
if coi.Spec.Windows.Resources.CPU.Maximum != nil {
v1.ProcessorMaximum = int64(*coi.Spec.Windows.Resources.CPU.Maximum)
v2Container.Processor.Maximum = int32(v1.ProcessorMaximum)
}
}
if coi.Spec.Windows.Resources.Memory != nil {
if coi.Spec.Windows.Resources.Memory.Limit != nil {
v1.MemoryMaximumInMB = int64(*coi.Spec.Windows.Resources.Memory.Limit) / 1024 / 1024
v2Container.Memory = &hcsschema.Memory{SizeInMB: int32(v1.MemoryMaximumInMB)}
}
}
if coi.Spec.Windows.Resources.Storage != nil {
if coi.Spec.Windows.Resources.Storage.Bps != nil || coi.Spec.Windows.Resources.Storage.Iops != nil {
v2Container.Storage.QoS = &hcsschema.StorageQoS{}
}
if coi.Spec.Windows.Resources.Storage.Bps != nil {
v1.StorageBandwidthMaximum = *coi.Spec.Windows.Resources.Storage.Bps
v2Container.Storage.QoS.BandwidthMaximum = int32(v1.StorageBandwidthMaximum)
}
if coi.Spec.Windows.Resources.Storage.Iops != nil {
v1.StorageIOPSMaximum = *coi.Spec.Windows.Resources.Storage.Iops
v2Container.Storage.QoS.IopsMaximum = int32(*coi.Spec.Windows.Resources.Storage.Iops)
}
}
}
// TODO V2 networking. Only partial at the moment. v2.Container.Networking.Namespace specifically
if coi.Spec.Windows.Network != nil {
v2Container.Networking = &hcsschema.Networking{}
v1.EndpointList = coi.Spec.Windows.Network.EndpointList
v2Container.Networking.Namespace = coi.actualNetworkNamespace
v1.AllowUnqualifiedDNSQuery = coi.Spec.Windows.Network.AllowUnqualifiedDNSQuery
v2Container.Networking.AllowUnqualifiedDnsQuery = v1.AllowUnqualifiedDNSQuery
if coi.Spec.Windows.Network.DNSSearchList != nil {
v1.DNSSearchList = strings.Join(coi.Spec.Windows.Network.DNSSearchList, ",")
v2Container.Networking.DnsSearchList = v1.DNSSearchList
}
v1.NetworkSharedContainerName = coi.Spec.Windows.Network.NetworkSharedContainerName
v2Container.Networking.NetworkSharedContainerName = v1.NetworkSharedContainerName
}
// // TODO V2 Credentials not in the schema yet.
if cs, ok := coi.Spec.Windows.CredentialSpec.(string); ok {
v1.Credentials = cs
}
if coi.Spec.Root == nil {
return nil, fmt.Errorf("spec is invalid - root isn't populated")
}
if coi.Spec.Root.Readonly {
return nil, fmt.Errorf(`invalid container spec - readonly is not supported for Windows containers`)
}
// Strip off the top-most RW/scratch layer as that's passed in separately to HCS for v1
v1.LayerFolderPath = coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
if (schemaversion.IsV21(coi.actualSchemaVersion) && coi.HostingSystem == nil) ||
(schemaversion.IsV10(coi.actualSchemaVersion) && coi.Spec.Windows.HyperV == nil) {
// Argon v1 or v2.
const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}(|\\)$`
if matched, err := regexp.MatchString(volumeGUIDRegex, coi.Spec.Root.Path); !matched || err != nil {
return nil, fmt.Errorf(`invalid container spec - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, coi.Spec.Root.Path)
}
if coi.Spec.Root.Path[len(coi.Spec.Root.Path)-1] != '\\' {
coi.Spec.Root.Path += `\` // Be nice to clients and make sure well-formed for back-compat
}
v1.VolumePath = coi.Spec.Root.Path[:len(coi.Spec.Root.Path)-1] // Strip the trailing backslash. Required for v1.
v2Container.Storage.Path = coi.Spec.Root.Path
} else {
// A hosting system was supplied, implying v2 Xenon; OR a v1 Xenon.
if schemaversion.IsV10(coi.actualSchemaVersion) {
// V1 Xenon
v1.HvPartition = true
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.HyperV == nil { // Be resilient to nil de-reference
return nil, fmt.Errorf(`invalid container spec - Spec.Windows.HyperV is nil`)
}
if coi.Spec.Windows.HyperV.UtilityVMPath != "" {
// Client-supplied utility VM path
v1.HvRuntime = &schema1.HvRuntime{ImagePath: coi.Spec.Windows.HyperV.UtilityVMPath}
} else {
// Client was lazy. Let's locate it from the layer folders instead.
uvmImagePath, err := uvmfolder.LocateUVMFolder(coi.Spec.Windows.LayerFolders)
if err != nil {
return nil, err
}
v1.HvRuntime = &schema1.HvRuntime{ImagePath: filepath.Join(uvmImagePath, `UtilityVM`)}
}
} else {
// Hosting system was supplied, so is v2 Xenon.
v2Container.Storage.Path = coi.Spec.Root.Path
if coi.HostingSystem.OS() == "windows" {
layers, err := computeV2Layers(coi.HostingSystem, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1])
if err != nil {
return nil, err
}
v2Container.Storage.Layers = layers
}
}
}
if coi.HostingSystem == nil { // Argon v1 or v2
for _, layerPath := range coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1] {
layerID, err := wclayer.LayerID(layerPath)
if err != nil {
return nil, err
}
v1.Layers = append(v1.Layers, schema1.Layer{ID: layerID.String(), Path: layerPath})
v2Container.Storage.Layers = append(v2Container.Storage.Layers, hcsschema.Layer{Id: layerID.String(), Path: layerPath})
}
}
// Add the mounts as mapped directories or mapped pipes
// TODO: Mapped pipes to add in v2 schema.
var (
mdsv1 []schema1.MappedDir
mpsv1 []schema1.MappedPipe
mdsv2 []hcsschema.MappedDirectory
mpsv2 []hcsschema.MappedPipe
)
for _, mount := range coi.Spec.Mounts {
const pipePrefix = `\\.\pipe\`
if mount.Type != "" {
return nil, fmt.Errorf("invalid container spec - Mount.Type '%s' must not be set", mount.Type)
}
if strings.HasPrefix(strings.ToLower(mount.Destination), pipePrefix) {
mpsv1 = append(mpsv1, schema1.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
mpsv2 = append(mpsv2, hcsschema.MappedPipe{HostPath: mount.Source, ContainerPipeName: mount.Destination[len(pipePrefix):]})
} else {
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
}
}
mdv1 := schema1.MappedDir{HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: readOnly}
mdv2 := hcsschema.MappedDirectory{ContainerPath: mount.Destination, ReadOnly: readOnly}
if coi.HostingSystem == nil {
mdv2.HostPath = mount.Source
} else {
uvmPath, err := coi.HostingSystem.GetVSMBUvmPath(mount.Source)
if err != nil {
if err == uvm.ErrNotAttached {
// It could also be a scsi mount.
uvmPath, err = coi.HostingSystem.GetScsiUvmPath(mount.Source)
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
mdv2.HostPath = uvmPath
}
mdsv1 = append(mdsv1, mdv1)
mdsv2 = append(mdsv2, mdv2)
}
}
v1.MappedDirectories = mdsv1
v2Container.MappedDirectories = mdsv2
if len(mpsv1) > 0 && osversion.Get().Build < osversion.RS3 {
return nil, fmt.Errorf("named pipe mounts are not supported on this version of Windows")
}
v1.MappedPipes = mpsv1
v2Container.MappedPipes = mpsv2
// Put the v2Container object as a HostedSystem for a Xenon, or directly in the schema for an Argon.
if coi.HostingSystem == nil {
v2.Container = v2Container
} else {
v2.HostingSystemId = coi.HostingSystem.ID()
v2.HostedSystem = &hcsschema.HostedSystem{
SchemaVersion: schemaversion.SchemaV21(),
Container: v2Container,
}
}
if schemaversion.IsV10(coi.actualSchemaVersion) {
return v1, nil
}
return v2, nil
}

View File

@ -1,373 +0,0 @@
// +build windows
package hcsoci
import (
"fmt"
"os"
"path"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/ospath"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type lcowLayerEntry struct {
hostPath string
uvmPath string
scsi bool
}
const scratchPath = "scratch"
// mountContainerLayers is a helper for clients to hide all the complexity of layer mounting
// Layer folder are in order: base, [rolayer1..rolayern,] scratch
//
// v1/v2: Argon WCOW: Returns the mount path on the host as a volume GUID.
// v1: Xenon WCOW: Done internally in HCS, so no point calling doing anything here.
// v2: Xenon WCOW: Returns a CombinedLayersV2 structure where ContainerRootPath is a folder
// inside the utility VM which is a GUID mapping of the scratch folder. Each
// of the layers are the VSMB locations where the read-only layers are mounted.
//
func MountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM) (interface{}, error) {
logrus.Debugln("hcsshim::mountContainerLayers", layerFolders)
if uvm == nil {
if len(layerFolders) < 2 {
return nil, fmt.Errorf("need at least two layers - base and scratch")
}
path := layerFolders[len(layerFolders)-1]
rest := layerFolders[:len(layerFolders)-1]
logrus.Debugln("hcsshim::mountContainerLayers ActivateLayer", path)
if err := wclayer.ActivateLayer(path); err != nil {
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Preparelayer", path, rest)
if err := wclayer.PrepareLayer(path, rest); err != nil {
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
}
return nil, err
}
mountPath, err := wclayer.GetLayerMountPath(path)
if err != nil {
if err := wclayer.UnprepareLayer(path); err != nil {
logrus.Warnf("Failed to Unprepare %s: %s", path, err)
}
if err2 := wclayer.DeactivateLayer(path); err2 != nil {
logrus.Warnf("Failed to Deactivate %s: %s", path, err)
}
return nil, err
}
return mountPath, nil
}
// V2 UVM
logrus.Debugf("hcsshim::mountContainerLayers Is a %s V2 UVM", uvm.OS())
// Add each read-only layers. For Windows, this is a VSMB share with the ResourceUri ending in
// a GUID based on the folder path. For Linux, this is a VPMEM device, except where is over the
// max size supported, where we put it on SCSI instead.
//
// Each layer is ref-counted so that multiple containers in the same utility VM can share them.
var wcowLayersAdded []string
var lcowlayersAdded []lcowLayerEntry
attachedSCSIHostPath := ""
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
var err error
if uvm.OS() == "windows" {
options := &hcsschema.VirtualSmbShareOptions{
ReadOnly: true,
PseudoOplocks: true,
TakeBackupPrivilege: true,
CacheIo: true,
ShareRead: true,
}
err = uvm.AddVSMB(layerPath, "", options)
if err == nil {
wcowLayersAdded = append(wcowLayersAdded, layerPath)
}
} else {
uvmPath := ""
hostPath := filepath.Join(layerPath, "layer.vhd")
var fi os.FileInfo
fi, err = os.Stat(hostPath)
if err == nil && uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
// Too big for PMEM. Add on SCSI instead (at /tmp/S<C>/<L>).
var (
controller int
lun int32
)
controller, lun, err = uvm.AddSCSILayer(hostPath)
if err == nil {
lcowlayersAdded = append(lcowlayersAdded,
lcowLayerEntry{
hostPath: hostPath,
uvmPath: fmt.Sprintf("/tmp/S%d/%d", controller, lun),
scsi: true,
})
}
} else {
_, uvmPath, err = uvm.AddVPMEM(hostPath, true) // UVM path is calculated. Will be /tmp/vN/
if err == nil {
lcowlayersAdded = append(lcowlayersAdded,
lcowLayerEntry{
hostPath: hostPath,
uvmPath: uvmPath,
})
}
}
}
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
}
// Add the scratch at an unused SCSI location. The container path inside the
// utility VM will be C:\<ID>.
hostPath := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
// BUGBUG Rename guestRoot better.
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
_, _, err := uvm.AddSCSI(hostPath, containerScratchPathInUVM, false)
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
attachedSCSIHostPath = hostPath
if uvm.OS() == "windows" {
// Load the filter at the C:\s<ID> location calculated above. We pass into this request each of the
// read-only layer folders.
layers, err := computeV2Layers(uvm, wcowLayersAdded)
if err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
guestRequest := guestrequest.CombinedLayers{
ContainerRootPath: containerScratchPathInUVM,
Layers: layers,
}
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
Settings: guestRequest,
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Add,
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
return guestRequest, nil
}
// This is the LCOW layout inside the utilityVM. NNN is the container "number"
// which increments for each container created in a utility VM.
//
// /run/gcs/c/NNN/config.json
// /run/gcs/c/NNN/rootfs
// /run/gcs/c/NNN/scratch/upper
// /run/gcs/c/NNN/scratch/work
//
// /dev/sda on /tmp/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
// /dev/pmem0 on /tmp/v0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
// /dev/sdb on /run/gcs/c/NNN/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
// overlay on /run/gcs/c/NNN/rootfs type overlay (rw,relatime,lowerdir=/tmp/v0,upperdir=/run/gcs/c/NNN/scratch/upper,workdir=/run/gcs/c/NNN/scratch/work)
//
// Where /dev/sda is the scratch for utility VM itself
// /dev/pmemX are read-only layers for containers
// /dev/sd(b...) are scratch spaces for each container
layers := []hcsschema.Layer{}
for _, l := range lcowlayersAdded {
layers = append(layers, hcsschema.Layer{Path: l.uvmPath})
}
guestRequest := guestrequest.CombinedLayers{
ContainerRootPath: path.Join(guestRoot, rootfsPath),
Layers: layers,
ScratchPath: containerScratchPathInUVM,
}
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Add,
Settings: guestRequest,
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
cleanupOnMountFailure(uvm, wcowLayersAdded, lcowlayersAdded, attachedSCSIHostPath)
return nil, err
}
logrus.Debugln("hcsshim::mountContainerLayers Succeeded")
return guestRequest, nil
}
// UnmountOperation is used when calling Unmount() to determine what type of unmount is
// required. In V1 schema, this must be unmountOperationAll. In V2, client can
// be more optimal and only unmount what they need which can be a minor performance
// improvement (eg if you know only one container is running in a utility VM, and
// the UVM is about to be torn down, there's no need to unmount the VSMB shares,
// just SCSI to have a consistent file system).
type UnmountOperation uint
const (
UnmountOperationSCSI UnmountOperation = 0x01
UnmountOperationVSMB = 0x02
UnmountOperationVPMEM = 0x04
UnmountOperationAll = UnmountOperationSCSI | UnmountOperationVSMB | UnmountOperationVPMEM
)
// UnmountContainerLayers is a helper for clients to hide all the complexity of layer unmounting
func UnmountContainerLayers(layerFolders []string, guestRoot string, uvm *uvm.UtilityVM, op UnmountOperation) error {
logrus.Debugln("hcsshim::unmountContainerLayers", layerFolders)
if uvm == nil {
// Must be an argon - folders are mounted on the host
if op != UnmountOperationAll {
return fmt.Errorf("only operation supported for host-mounted folders is unmountOperationAll")
}
if len(layerFolders) < 1 {
return fmt.Errorf("need at least one layer for Unmount")
}
path := layerFolders[len(layerFolders)-1]
logrus.Debugln("hcsshim::Unmount UnprepareLayer", path)
if err := wclayer.UnprepareLayer(path); err != nil {
return err
}
// TODO Should we try this anyway?
logrus.Debugln("hcsshim::unmountContainerLayers DeactivateLayer", path)
return wclayer.DeactivateLayer(path)
}
// V2 Xenon
// Base+Scratch as a minimum. This is different to v1 which only requires the scratch
if len(layerFolders) < 2 {
return fmt.Errorf("at least two layers are required for unmount")
}
var retError error
// Unload the storage filter followed by the SCSI scratch
if (op & UnmountOperationSCSI) == UnmountOperationSCSI {
containerScratchPathInUVM := ospath.Join(uvm.OS(), guestRoot, scratchPath)
logrus.Debugf("hcsshim::unmountContainerLayers CombinedLayers %s", containerScratchPathInUVM)
combinedLayersModification := &hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeCombinedLayers,
RequestType: requesttype.Remove,
Settings: guestrequest.CombinedLayers{ContainerRootPath: containerScratchPathInUVM},
},
}
if err := uvm.Modify(combinedLayersModification); err != nil {
logrus.Errorf(err.Error())
}
// Hot remove the scratch from the SCSI controller
hostScratchFile := filepath.Join(layerFolders[len(layerFolders)-1], "sandbox.vhdx")
logrus.Debugf("hcsshim::unmountContainerLayers SCSI %s %s", containerScratchPathInUVM, hostScratchFile)
if err := uvm.RemoveSCSI(hostScratchFile); err != nil {
e := fmt.Errorf("failed to remove SCSI %s: %s", hostScratchFile, err)
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
// Remove each of the read-only layers from VSMB. These's are ref-counted and
// only removed once the count drops to zero. This allows multiple containers
// to share layers.
if uvm.OS() == "windows" && len(layerFolders) > 1 && (op&UnmountOperationVSMB) == UnmountOperationVSMB {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
if e := uvm.RemoveVSMB(layerPath); e != nil {
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
}
// Remove each of the read-only layers from VPMEM (or SCSI). These's are ref-counted
// and only removed once the count drops to zero. This allows multiple containers to
// share layers. Note that SCSI is used on large layers.
if uvm.OS() == "linux" && len(layerFolders) > 1 && (op&UnmountOperationVPMEM) == UnmountOperationVPMEM {
for _, layerPath := range layerFolders[:len(layerFolders)-1] {
hostPath := filepath.Join(layerPath, "layer.vhd")
if fi, err := os.Stat(hostPath); err != nil {
var e error
if uint64(fi.Size()) > uvm.PMemMaxSizeBytes() {
e = uvm.RemoveSCSI(hostPath)
} else {
e = uvm.RemoveVPMEM(hostPath)
}
if e != nil {
logrus.Debugln(e)
if retError == nil {
retError = e
} else {
retError = errors.Wrapf(retError, e.Error())
}
}
}
}
}
// TODO (possibly) Consider deleting the container directory in the utility VM
return retError
}
func cleanupOnMountFailure(uvm *uvm.UtilityVM, wcowLayers []string, lcowLayers []lcowLayerEntry, scratchHostPath string) {
for _, wl := range wcowLayers {
if err := uvm.RemoveVSMB(wl); err != nil {
logrus.Warnf("Possibly leaked vsmbshare on error removal path: %s", err)
}
}
for _, ll := range lcowLayers {
if ll.scsi {
if err := uvm.RemoveSCSI(ll.hostPath); err != nil {
logrus.Warnf("Possibly leaked SCSI on error removal path: %s", err)
}
} else if err := uvm.RemoveVPMEM(ll.hostPath); err != nil {
logrus.Warnf("Possibly leaked vpmemdevice on error removal path: %s", err)
}
}
if scratchHostPath != "" {
if err := uvm.RemoveSCSI(scratchHostPath); err != nil {
logrus.Warnf("Possibly leaked SCSI disk on error removal path: %s", err)
}
}
}
func computeV2Layers(vm *uvm.UtilityVM, paths []string) (layers []hcsschema.Layer, err error) {
for _, path := range paths {
uvmPath, err := vm.GetVSMBUvmPath(path)
if err != nil {
return nil, err
}
layerID, err := wclayer.LayerID(path)
if err != nil {
return nil, err
}
layers = append(layers, hcsschema.Layer{Id: layerID.String(), Path: uvmPath})
}
return layers, nil
}

View File

@ -1,41 +0,0 @@
package hcsoci
import (
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/sirupsen/logrus"
)
func createNetworkNamespace(coi *createOptionsInternal, resources *Resources) error {
netID, err := hns.CreateNamespace()
if err != nil {
return err
}
logrus.Infof("created network namespace %s for %s", netID, coi.ID)
resources.netNS = netID
resources.createdNetNS = true
for _, endpointID := range coi.Spec.Windows.Network.EndpointList {
err = hns.AddNamespaceEndpoint(netID, endpointID)
if err != nil {
return err
}
logrus.Infof("added network endpoint %s to namespace %s", endpointID, netID)
resources.networkEndpoints = append(resources.networkEndpoints, endpointID)
}
return nil
}
func getNamespaceEndpoints(netNS string) ([]*hns.HNSEndpoint, error) {
ids, err := hns.GetNamespaceEndpoints(netNS)
if err != nil {
return nil, err
}
var endpoints []*hns.HNSEndpoint
for _, id := range ids {
endpoint, err := hns.GetHNSEndpointByID(id)
if err != nil {
return nil, err
}
endpoints = append(endpoints, endpoint)
}
return endpoints, nil
}

View File

@ -1,127 +0,0 @@
package hcsoci
import (
"os"
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/sirupsen/logrus"
)
// NetNS returns the network namespace for the container
func (r *Resources) NetNS() string {
return r.netNS
}
// Resources is the structure returned as part of creating a container. It holds
// nothing useful to clients, hence everything is lowercased. A client would use
// it in a call to ReleaseResource to ensure everything is cleaned up when a
// container exits.
type Resources struct {
// containerRootInUVM is the base path in a utility VM where elements relating
// to a container are exposed. For example, the mounted filesystem; the runtime
// spec (in the case of LCOW); overlay and scratch (in the case of LCOW).
//
// For WCOW, this will be under C:\c\N, and for LCOW this will
// be under /run/gcs/c/N. N is an atomic counter for each container created
// in that utility VM. For LCOW this is also the "OCI Bundle Path".
containerRootInUVM string
// layers is an array of the layer folder paths which have been mounted either on
// the host in the case or a WCOW Argon, or in a utility VM for WCOW Xenon and LCOW.
layers []string
// vsmbMounts is an array of the host-paths mounted into a utility VM to support
// (bind-)mounts into a WCOW v2 Xenon.
vsmbMounts []string
// plan9Mounts is an array of all the host paths which have been added to
// an LCOW utility VM
plan9Mounts []string
// netNS is the network namespace
netNS string
// networkEndpoints is the list of network endpoints used by the container
networkEndpoints []string
// createNetNS indicates if the network namespace has been created
createdNetNS bool
// addedNetNSToVM indicates if the network namespace has been added to the containers utility VM
addedNetNSToVM bool
// scsiMounts is an array of the host-paths mounted into a utility VM to
// support scsi device passthrough.
scsiMounts []string
}
// TODO: Method on the resources?
func ReleaseResources(r *Resources, vm *uvm.UtilityVM, all bool) error {
if vm != nil && r.addedNetNSToVM {
err := vm.RemoveNetNS(r.netNS)
if err != nil {
logrus.Warn(err)
}
r.addedNetNSToVM = false
}
if r.createdNetNS {
for len(r.networkEndpoints) != 0 {
endpoint := r.networkEndpoints[len(r.networkEndpoints)-1]
err := hns.RemoveNamespaceEndpoint(r.netNS, endpoint)
if err != nil {
if !os.IsNotExist(err) {
return err
}
logrus.Warnf("removing endpoint %s from namespace %s: does not exist", endpoint, r.NetNS())
}
r.networkEndpoints = r.networkEndpoints[:len(r.networkEndpoints)-1]
}
r.networkEndpoints = nil
err := hns.RemoveNamespace(r.netNS)
if err != nil && !os.IsNotExist(err) {
return err
}
r.createdNetNS = false
}
if len(r.layers) != 0 {
op := UnmountOperationSCSI
if vm == nil || all {
op = UnmountOperationAll
}
err := UnmountContainerLayers(r.layers, r.containerRootInUVM, vm, op)
if err != nil {
return err
}
r.layers = nil
}
if all {
for len(r.vsmbMounts) != 0 {
mount := r.vsmbMounts[len(r.vsmbMounts)-1]
if err := vm.RemoveVSMB(mount); err != nil {
return err
}
r.vsmbMounts = r.vsmbMounts[:len(r.vsmbMounts)-1]
}
for len(r.plan9Mounts) != 0 {
mount := r.plan9Mounts[len(r.plan9Mounts)-1]
if err := vm.RemovePlan9(mount); err != nil {
return err
}
r.plan9Mounts = r.plan9Mounts[:len(r.plan9Mounts)-1]
}
for _, path := range r.scsiMounts {
if err := vm.RemoveSCSI(path); err != nil {
return err
}
r.scsiMounts = nil
}
}
return nil
}

View File

@ -1,104 +0,0 @@
// +build windows
package hcsoci
// Contains functions relating to a LCOW container, as opposed to a utility VM
import (
"fmt"
"path"
"strconv"
"strings"
"github.com/Microsoft/hcsshim/internal/guestrequest"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
const rootfsPath = "rootfs"
const mountPathPrefix = "m"
func allocateLinuxResources(coi *createOptionsInternal, resources *Resources) error {
if coi.Spec.Root == nil {
coi.Spec.Root = &specs.Root{}
}
if coi.Spec.Root.Path == "" {
logrus.Debugln("hcsshim::allocateLinuxResources mounting storage")
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
if err != nil {
return fmt.Errorf("failed to mount container storage: %s", err)
}
if coi.HostingSystem == nil {
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
} else {
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon LCOW
}
resources.layers = coi.Spec.Windows.LayerFolders
} else {
// This is the "Plan 9" root filesystem.
// TODO: We need a test for this. Ask @jstarks how you can even lay this out on Windows.
hostPath := coi.Spec.Root.Path
uvmPathForContainersFileSystem := path.Join(resources.containerRootInUVM, rootfsPath)
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForContainersFileSystem, coi.Spec.Root.Readonly)
if err != nil {
return fmt.Errorf("adding plan9 root: %s", err)
}
coi.Spec.Root.Path = uvmPathForContainersFileSystem
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
}
for i, mount := range coi.Spec.Mounts {
switch mount.Type {
case "bind":
case "physical-disk":
case "virtual-disk":
default:
// Unknown mount type
continue
}
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}
if coi.HostingSystem != nil {
hostPath := mount.Source
uvmPathForShare := path.Join(resources.containerRootInUVM, mountPathPrefix+strconv.Itoa(i))
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
break
}
}
if mount.Type == "physical-disk" {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
}
resources.scsiMounts = append(resources.scsiMounts, hostPath)
coi.Spec.Mounts[i].Type = "none"
} else if mount.Type == "virtual-disk" {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSI(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
}
resources.scsiMounts = append(resources.scsiMounts, hostPath)
coi.Spec.Mounts[i].Type = "none"
} else {
logrus.Debugf("hcsshim::allocateLinuxResources Hot-adding Plan9 for OCI mount %+v", mount)
err := coi.HostingSystem.AddPlan9(hostPath, uvmPathForShare, readOnly)
if err != nil {
return fmt.Errorf("adding plan9 mount %+v: %s", mount, err)
}
resources.plan9Mounts = append(resources.plan9Mounts, hostPath)
}
coi.Spec.Mounts[i].Source = uvmPathForShare
}
}
return nil
}

View File

@ -1,127 +0,0 @@
// +build windows
package hcsoci
// Contains functions relating to a WCOW container, as opposed to a utility VM
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/wclayer"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
func allocateWindowsResources(coi *createOptionsInternal, resources *Resources) error {
if coi.Spec == nil || coi.Spec.Windows == nil || coi.Spec.Windows.LayerFolders == nil {
return fmt.Errorf("field 'Spec.Windows.Layerfolders' is not populated")
}
scratchFolder := coi.Spec.Windows.LayerFolders[len(coi.Spec.Windows.LayerFolders)-1]
logrus.Debugf("hcsshim::allocateWindowsResources scratch folder: %s", scratchFolder)
// TODO: Remove this code for auto-creation. Make the caller responsible.
// Create the directory for the RW scratch layer if it doesn't exist
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
logrus.Debugf("hcsshim::allocateWindowsResources container scratch folder does not exist so creating: %s ", scratchFolder)
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
return fmt.Errorf("failed to auto-create container scratch folder %s: %s", scratchFolder, err)
}
}
// Create sandbox.vhdx if it doesn't exist in the scratch folder. It's called sandbox.vhdx
// rather than scratch.vhdx as in the v1 schema, it's hard-coded in HCS.
if _, err := os.Stat(filepath.Join(scratchFolder, "sandbox.vhdx")); os.IsNotExist(err) {
logrus.Debugf("hcsshim::allocateWindowsResources container sandbox.vhdx does not exist so creating in %s ", scratchFolder)
if err := wclayer.CreateScratchLayer(scratchFolder, coi.Spec.Windows.LayerFolders[:len(coi.Spec.Windows.LayerFolders)-1]); err != nil {
return fmt.Errorf("failed to CreateSandboxLayer %s", err)
}
}
if coi.Spec.Root == nil {
coi.Spec.Root = &specs.Root{}
}
if coi.Spec.Root.Path == "" && (coi.HostingSystem != nil || coi.Spec.Windows.HyperV == nil) {
logrus.Debugln("hcsshim::allocateWindowsResources mounting storage")
mcl, err := MountContainerLayers(coi.Spec.Windows.LayerFolders, resources.containerRootInUVM, coi.HostingSystem)
if err != nil {
return fmt.Errorf("failed to mount container storage: %s", err)
}
if coi.HostingSystem == nil {
coi.Spec.Root.Path = mcl.(string) // Argon v1 or v2
} else {
coi.Spec.Root.Path = mcl.(guestrequest.CombinedLayers).ContainerRootPath // v2 Xenon WCOW
}
resources.layers = coi.Spec.Windows.LayerFolders
}
// Validate each of the mounts. If this is a V2 Xenon, we have to add them as
// VSMB shares to the utility VM. For V1 Xenon and Argons, there's nothing for
// us to do as it's done by HCS.
for i, mount := range coi.Spec.Mounts {
if mount.Destination == "" || mount.Source == "" {
return fmt.Errorf("invalid OCI spec - a mount must have both source and a destination: %+v", mount)
}
switch mount.Type {
case "":
case "physical-disk":
case "virtual-disk":
default:
return fmt.Errorf("invalid OCI spec - Type '%s' not supported", mount.Type)
}
if coi.HostingSystem != nil && schemaversion.IsV21(coi.actualSchemaVersion) {
uvmPath := fmt.Sprintf("C:\\%s\\%d", coi.actualID, i)
readOnly := false
for _, o := range mount.Options {
if strings.ToLower(o) == "ro" {
readOnly = true
break
}
}
if mount.Type == "physical-disk" {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI physical disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSIPhysicalDisk(mount.Source, uvmPath, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI physical disk mount %+v: %s", mount, err)
}
coi.Spec.Mounts[i].Type = ""
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
} else if mount.Type == "virtual-disk" {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding SCSI virtual disk for OCI mount %+v", mount)
_, _, err := coi.HostingSystem.AddSCSI(mount.Source, uvmPath, readOnly)
if err != nil {
return fmt.Errorf("adding SCSI virtual disk mount %+v: %s", mount, err)
}
coi.Spec.Mounts[i].Type = ""
resources.scsiMounts = append(resources.scsiMounts, mount.Source)
} else {
logrus.Debugf("hcsshim::allocateWindowsResources Hot-adding VSMB share for OCI mount %+v", mount)
options := &hcsschema.VirtualSmbShareOptions{}
if readOnly {
options.ReadOnly = true
options.CacheIo = true
options.ShareRead = true
options.ForceLevelIIOplocks = true
break
}
err := coi.HostingSystem.AddVSMB(mount.Source, "", options)
if err != nil {
return fmt.Errorf("failed to add VSMB share to utility VM for mount %+v: %s", mount, err)
}
resources.vsmbMounts = append(resources.vsmbMounts, mount.Source)
}
}
}
return nil
}

View File

@ -1,260 +0,0 @@
// +build windows,functional
package hcsoci
//import (
// "os"
// "path/filepath"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// specs "github.com/opencontainers/runtime-spec/specs-go"
//)
//// --------------------------------
//// W C O W A R G O N V 1
//// --------------------------------
//// A v1 Argon with a single base layer. It also validates hostname functionality is propagated.
//func TestV1Argon(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV10(),
// Id: "TestV1Argon",
// Owner: "unit-test",
// Spec: &specs.Spec{
// Hostname: "goofy",
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "goofy")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with a single base layer which uses the auto-mount capability
//func TestV1ArgonAutoMount(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersBusybox, tempDir)
// c, err := CreateContainer(&CreateOptions{
// Id: "TestV1ArgonAutoMount",
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with multiple layers which uses the auto-mount capability
//func TestV1ArgonMultipleBaseLayersAutoMount(t *testing.T) {
// t.Skip("fornow")
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layers := append(layersBusybox, containerScratchDir)
// c, err := CreateContainer(&CreateOptions{
// Id: "TestV1ArgonMultipleBaseLayersAutoMount",
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v1 Argon with a single mapped directory.
//func TestV1ArgonSingleMappedDirectory(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV10(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, c)
// c.Terminate()
//}
//// --------------------------------
//// W C O W A R G O N V 2
//// --------------------------------
//// A v2 Argon with a single base layer. It also validates hostname functionality is propagated.
//// It also uses an auto-generated ID.
//func TestV2Argon(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Hostname: "mickey",
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// runCommand(t, c, "cmd /s /c hostname", `c:\`, "mickey")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with multiple layers
//func TestV2ArgonMultipleBaseLayers(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersBusybox, tempDir)
// mountPath, err := mountContainerLayers(layers, nil)
// if err != nil {
// t.Fatalf("failed to mount container storage: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Id: "TestV2ArgonMultipleBaseLayers",
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Root: &specs.Root{Path: mountPath.(string)},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with multiple layers which uses the auto-mount capability and auto-create
//func TestV2ArgonAutoMountMultipleBaseLayers(t *testing.T) {
// t.Skip("fornow")
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layers := append(layersBusybox, containerScratchDir)
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Id: "TestV2ArgonAutoMountMultipleBaseLayers",
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layers}},
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, "cmd /s /c echo Hello", `c:\`, "Hello")
// stopContainer(t, c)
// c.Terminate()
//}
//// A v2 Argon with a single mapped directory.
//func TestV2ArgonSingleMappedDirectory(t *testing.T) {
// t.Skip("fornow")
// tempDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(tempDir)
// layers := append(layersNanoserver, tempDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// c, err := CreateContainer(&CreateOptions{
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layers},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("Failed create: %s", err)
// }
// defer unmountContainerLayers(layers, nil, unmountOperationAll)
// startContainer(t, c)
// runCommand(t, c, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, c)
// c.Terminate()
//}

View File

@ -1,365 +0,0 @@
// +build windows,functional
package hcsoci
//import (
// "fmt"
// "os"
// "path/filepath"
// "testing"
// "github.com/Microsoft/hcsshim/internal/schemaversion"
// specs "github.com/opencontainers/runtime-spec/specs-go"
//)
//// --------------------------------
//// W C O W X E N O N V 2
//// --------------------------------
//// A single WCOW xenon. Note in this test, neither the UVM or the
//// containers are supplied IDs - they will be autogenerated for us.
//// This is the minimum set of parameters needed to create a V2 WCOW xenon.
//func TestV2XenonWCOW(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create the container hosted inside the utility VM
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}
//// TODO: Have a similar test where the UVM scratch folder does not exist.
//// A single WCOW xenon but where the container sandbox folder is not pre-created by the client
//func TestV2XenonWCOWContainerSandboxFolderDoesNotExist(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWContainerSandboxFolderDoesNotExist_UVM", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // This is the important bit for this test. It's deleted here. We call the helper only to allocate a temporary directory
// containerScratchDir := createTempDir(t)
// os.RemoveAll(containerScratchDir)
// defer os.RemoveAll(containerScratchDir) // As auto-created
// layerFolders := append(layersBusybox, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// Id: "container",
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, "cmd /s /c echo TestV2XenonWCOW", `c:\`, "TestV2XenonWCOW")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}
//// TODO What about mount. Test with the client doing the mount.
//// TODO Test as above, but where sandbox for UVM is entirely created by a client to show how it's done.
//// Two v2 WCOW containers in the same UVM, each with a single base layer
//func TestV2XenonWCOWTwoContainers(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWTwoContainers_UVM", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // First hosted container
// firstContainerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(firstContainerScratchDir)
// firstLayerFolders := append(layersNanoserver, firstContainerScratchDir)
// firstHostedContainer, err := CreateContainer(&CreateOptions{
// Id: "FirstContainer",
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: firstLayerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(firstLayerFolders, uvm, unmountOperationAll)
// // Second hosted container
// secondContainerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(firstContainerScratchDir)
// secondLayerFolders := append(layersNanoserver, secondContainerScratchDir)
// secondHostedContainer, err := CreateContainer(&CreateOptions{
// Id: "SecondContainer",
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: secondLayerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(secondLayerFolders, uvm, unmountOperationAll)
// startContainer(t, firstHostedContainer)
// runCommand(t, firstHostedContainer, "cmd /s /c echo FirstContainer", `c:\`, "FirstContainer")
// startContainer(t, secondHostedContainer)
// runCommand(t, secondHostedContainer, "cmd /s /c echo SecondContainer", `c:\`, "SecondContainer")
// stopContainer(t, firstHostedContainer)
// stopContainer(t, secondHostedContainer)
// firstHostedContainer.Terminate()
// secondHostedContainer.Terminate()
//}
////// This verifies the container storage is unmounted correctly so that a second
////// container can be started from the same storage.
////func TestV2XenonWCOWWithRemount(t *testing.T) {
////// //t.Skip("Skipping for now")
//// uvmID := "Testv2XenonWCOWWithRestart_UVM"
//// uvmScratchDir, err := ioutil.TempDir("", "uvmScratch")
//// if err != nil {
//// t.Fatalf("Failed create temporary directory: %s", err)
//// }
//// if err := CreateWCOWSandbox(layersNanoserver[0], uvmScratchDir, uvmID); err != nil {
//// t.Fatalf("Failed create Windows UVM Sandbox: %s", err)
//// }
//// defer os.RemoveAll(uvmScratchDir)
//// uvm, err := CreateContainer(&CreateOptions{
//// Id: uvmID,
//// Owner: "unit-test",
//// SchemaVersion: SchemaV21(),
//// IsHostingSystem: true,
//// Spec: &specs.Spec{
//// Windows: &specs.Windows{
//// LayerFolders: []string{uvmScratchDir},
//// HyperV: &specs.WindowsHyperV{UtilityVMPath: filepath.Join(layersNanoserver[0], `UtilityVM\Files`)},
//// },
//// },
//// })
//// if err != nil {
//// t.Fatalf("Failed create UVM: %s", err)
//// }
//// defer uvm.Terminate()
//// if err := uvm.Start(); err != nil {
//// t.Fatalf("Failed start utility VM: %s", err)
//// }
//// // Mount the containers storage in the utility VM
//// containerScratchDir := createWCOWTempDirWithSandbox(t)
//// layerFolders := append(layersNanoserver, containerScratchDir)
//// cls, err := Mount(layerFolders, uvm, SchemaV21())
//// if err != nil {
//// t.Fatalf("failed to mount container storage: %s", err)
//// }
//// combinedLayers := cls.(CombinedLayersV2)
//// mountedLayers := &ContainersResourcesStorageV2{
//// Layers: combinedLayers.Layers,
//// Path: combinedLayers.ContainerRootPath,
//// }
//// defer func() {
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
//// t.Fatalf("failed to unmount container storage: %s", err)
//// }
//// }()
//// // Create the first container
//// defer os.RemoveAll(containerScratchDir)
//// xenon, err := CreateContainer(&CreateOptions{
//// Id: "container",
//// Owner: "unit-test",
//// HostingSystem: uvm,
//// SchemaVersion: SchemaV21(),
//// Spec: &specs.Spec{Windows: &specs.Windows{}}, // No layerfolders as we mounted them ourself.
//// })
//// if err != nil {
//// t.Fatalf("CreateContainer failed: %s", err)
//// }
//// // Start/stop the first container
//// startContainer(t, xenon)
//// runCommand(t, xenon, "cmd /s /c echo TestV2XenonWCOWFirstStart", `c:\`, "TestV2XenonWCOWFirstStart")
//// stopContainer(t, xenon)
//// xenon.Terminate()
//// // Now unmount and remount to exactly the same places
//// if err := Unmount(layerFolders, uvm, SchemaV21(), unmountOperationAll); err != nil {
//// t.Fatalf("failed to unmount container storage: %s", err)
//// }
//// if _, err = Mount(layerFolders, uvm, SchemaV21()); err != nil {
//// t.Fatalf("failed to mount container storage: %s", err)
//// }
//// // Create an identical second container and verify it works too.
//// xenon2, err := CreateContainer(&CreateOptions{
//// Id: "container",
//// Owner: "unit-test",
//// HostingSystem: uvm,
//// SchemaVersion: SchemaV21(),
//// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
//// MountedLayers: mountedLayers,
//// })
//// if err != nil {
//// t.Fatalf("CreateContainer failed: %s", err)
//// }
//// startContainer(t, xenon2)
//// runCommand(t, xenon2, "cmd /s /c echo TestV2XenonWCOWAfterRemount", `c:\`, "TestV2XenonWCOWAfterRemount")
//// stopContainer(t, xenon2)
//// xenon2.Terminate()
////}
//// Lots of v2 WCOW containers in the same UVM, each with a single base layer. Containers aren't
//// actually started, but it stresses the SCSI controller hot-add logic.
//func TestV2XenonWCOWCreateLots(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWCreateLots", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // 63 as 0:0 is already taken as the UVMs scratch. So that leaves us with 64-1 left for container scratches on SCSI
// for i := 0; i < 63; i++ {
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// hostedContainer, err := CreateContainer(&CreateOptions{
// Id: fmt.Sprintf("container%d", i),
// HostingSystem: uvm,
// SchemaVersion: schemaversion.SchemaV21(),
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: layerFolders}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer hostedContainer.Terminate()
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// }
// // TODO: Should check the internal structures here for VSMB and SCSI
// // TODO: Push it over 63 now and will get a failure.
//}
//// Helper for the v2 Xenon tests to create a utility VM. Returns the UtilityVM
//// object; folder used as its scratch
//func createv2WCOWUVM(t *testing.T, uvmLayers []string, uvmId string, resources *specs.WindowsResources) (*UtilityVM, string) {
// scratchDir := createTempDir(t)
// uvm := UtilityVM{
// OperatingSystem: "windows",
// LayerFolders: append(uvmLayers, scratchDir),
// Resources: resources,
// }
// if uvmId != "" {
// uvm.Id = uvmId
// }
// if err := uvm.Create(); err != nil {
// t.Fatalf("Failed create WCOW v2 UVM: %s", err)
// }
// if err := uvm.Start(); err != nil {
// t.Fatalf("Failed start WCOW v2UVM: %s", err)
// }
// return &uvm, scratchDir
//}
//// TestV2XenonWCOWMultiLayer creates a V2 Xenon having multiple image layers
//func TestV2XenonWCOWMultiLayer(t *testing.T) {
// t.Skip("for now")
// uvmMemory := uint64(1 * 1024 * 1024 * 1024)
// uvmCPUCount := uint64(2)
// resources := &specs.WindowsResources{
// Memory: &specs.WindowsMemoryResources{
// Limit: &uvmMemory,
// },
// CPU: &specs.WindowsCPUResources{
// Count: &uvmCPUCount,
// },
// }
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "TestV2XenonWCOWMultiLayer_UVM", resources)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create a sandbox for the hosted container
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// // Create the container. Note that this will auto-mount for us.
// containerLayers := append(layersBusybox, containerScratchDir)
// xenon, err := CreateContainer(&CreateOptions{
// Id: "container",
// HostingSystem: uvm,
// Spec: &specs.Spec{Windows: &specs.Windows{LayerFolders: containerLayers}},
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// // Start/stop the container
// startContainer(t, xenon)
// runCommand(t, xenon, "echo Container", `c:\`, "Container")
// stopContainer(t, xenon)
// xenon.Terminate()
// // TODO Move this to a defer function to fail if it fails.
// if err := unmountContainerLayers(containerLayers, uvm, unmountOperationAll); err != nil {
// t.Fatalf("unmount failed: %s", err)
// }
//}
//// TestV2XenonWCOWSingleMappedDirectory tests a V2 Xenon WCOW with a single mapped directory
//func TestV2XenonWCOWSingleMappedDirectory(t *testing.T) {
// t.Skip("Skipping for now")
// uvm, uvmScratchDir := createv2WCOWUVM(t, layersNanoserver, "", nil)
// defer os.RemoveAll(uvmScratchDir)
// defer uvm.Terminate()
// // Create the container hosted inside the utility VM
// containerScratchDir := createWCOWTempDirWithSandbox(t)
// defer os.RemoveAll(containerScratchDir)
// layerFolders := append(layersNanoserver, containerScratchDir)
// // Create a temp folder containing foo.txt which will be used for the bind-mount test.
// source := createTempDir(t)
// defer os.RemoveAll(source)
// mount := specs.Mount{
// Source: source,
// Destination: `c:\foo`,
// }
// f, err := os.OpenFile(filepath.Join(source, "foo.txt"), os.O_RDWR|os.O_CREATE, 0755)
// f.Close()
// hostedContainer, err := CreateContainer(&CreateOptions{
// HostingSystem: uvm,
// Spec: &specs.Spec{
// Windows: &specs.Windows{LayerFolders: layerFolders},
// Mounts: []specs.Mount{mount},
// },
// })
// if err != nil {
// t.Fatalf("CreateContainer failed: %s", err)
// }
// defer unmountContainerLayers(layerFolders, uvm, unmountOperationAll)
// // TODO BUGBUG NEED TO UNMOUNT TO VSMB SHARE FOR THE CONTAINER
// // Start/stop the container
// startContainer(t, hostedContainer)
// runCommand(t, hostedContainer, `cmd /s /c dir /b c:\foo`, `c:\`, "foo.txt")
// stopContainer(t, hostedContainer)
// hostedContainer.Terminate()
//}

View File

@ -1,9 +0,0 @@
package lcow
const (
// DefaultScratchSizeGB is the size of the default LCOW scratch disk in GB
DefaultScratchSizeGB = 20
// defaultVhdxBlockSizeMB is the block-size for the scratch VHDx's this package can create.
defaultVhdxBlockSizeMB = 1
)

View File

@ -1,55 +0,0 @@
package lcow
//func debugCommand(s string) string {
// return fmt.Sprintf(`echo -e 'DEBUG COMMAND: %s\\n--------------\\n';%s;echo -e '\\n\\n';`, s, s)
//}
// DebugLCOWGCS extracts logs from the GCS in LCOW. It's a useful hack for debugging,
// but not necessarily optimal, but all that is available to us in RS3.
//func (container *container) DebugLCOWGCS() {
// if logrus.GetLevel() < logrus.DebugLevel || len(os.Getenv("HCSSHIM_LCOW_DEBUG_ENABLE")) == 0 {
// return
// }
// var out bytes.Buffer
// cmd := os.Getenv("HCSSHIM_LCOW_DEBUG_COMMAND")
// if cmd == "" {
// cmd = `sh -c "`
// cmd += debugCommand("kill -10 `pidof gcs`") // SIGUSR1 for stackdump
// cmd += debugCommand("ls -l /tmp")
// cmd += debugCommand("cat /tmp/gcs.log")
// cmd += debugCommand("cat /tmp/gcs/gcs-stacks*")
// cmd += debugCommand("cat /tmp/gcs/paniclog*")
// cmd += debugCommand("ls -l /tmp/gcs")
// cmd += debugCommand("ls -l /tmp/gcs/*")
// cmd += debugCommand("cat /tmp/gcs/*/config.json")
// cmd += debugCommand("ls -lR /var/run/gcsrunc")
// cmd += debugCommand("cat /tmp/gcs/global-runc.log")
// cmd += debugCommand("cat /tmp/gcs/*/runc.log")
// cmd += debugCommand("ps -ef")
// cmd += `"`
// }
// proc, _, err := container.CreateProcessEx(
// &CreateProcessEx{
// OCISpecification: &specs.Spec{
// Process: &specs.Process{Args: []string{cmd}},
// Linux: &specs.Linux{},
// },
// CreateInUtilityVm: true,
// Stdout: &out,
// })
// defer func() {
// if proc != nil {
// proc.Kill()
// proc.Close()
// }
// }()
// if err != nil {
// logrus.Debugln("benign failure getting gcs logs: ", err)
// }
// if proc != nil {
// proc.WaitTimeout(time.Duration(int(time.Second) * 30))
// }
// logrus.Debugf("GCS Debugging:\n%s\n\nEnd GCS Debugging", strings.TrimSpace(out.String()))
//}

View File

@ -1,161 +0,0 @@
package lcow
import (
"fmt"
"io"
"strings"
"time"
"github.com/Microsoft/hcsshim/internal/copywithtimeout"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/schema2"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// ByteCounts are the number of bytes copied to/from standard handles. Note
// this is int64 rather than uint64 to match the golang io.Copy() signature.
type ByteCounts struct {
In int64
Out int64
Err int64
}
// ProcessOptions are the set of options which are passed to CreateProcessEx() to
// create a utility vm.
type ProcessOptions struct {
HCSSystem *hcs.System
Process *specs.Process
Stdin io.Reader // Optional reader for sending on to the processes stdin stream
Stdout io.Writer // Optional writer for returning the processes stdout stream
Stderr io.Writer // Optional writer for returning the processes stderr stream
CopyTimeout time.Duration // Timeout for the copy
CreateInUtilityVm bool // If the compute system is a utility VM
ByteCounts ByteCounts // How much data to copy on each stream if they are supplied. 0 means to io.EOF.
}
// CreateProcess creates a process either in an LCOW utility VM, or for starting
// the init process. TODO: Potentially extend for exec'd processes.
//
// It's essentially a glorified wrapper around hcs.ComputeSystem CreateProcess used
// for internal purposes.
//
// This is used on LCOW to run processes for remote filesystem commands, utilities,
// and debugging.
//
// It optional performs IO copies with timeout between the pipes provided as input,
// and the pipes in the process.
//
// In the ProcessOptions structure, if byte-counts are non-zero, a maximum of those
// bytes are copied to the appropriate standard IO reader/writer. When zero,
// it copies until EOF. It also returns byte-counts indicating how much data
// was sent/received from the process.
//
// It is the responsibility of the caller to call Close() on the process returned.
func CreateProcess(opts *ProcessOptions) (*hcs.Process, *ByteCounts, error) {
var environment = make(map[string]string)
copiedByteCounts := &ByteCounts{}
if opts == nil {
return nil, nil, fmt.Errorf("no options supplied")
}
if opts.HCSSystem == nil {
return nil, nil, fmt.Errorf("no HCS system supplied")
}
if opts.CreateInUtilityVm && opts.Process == nil {
return nil, nil, fmt.Errorf("process must be supplied for UVM process")
}
// Don't pass a process in if this is an LCOW container. This will start the init process.
if opts.Process != nil {
for _, v := range opts.Process.Env {
s := strings.SplitN(v, "=", 2)
if len(s) == 2 && len(s[1]) > 0 {
environment[s[0]] = s[1]
}
}
if _, ok := environment["PATH"]; !ok {
environment["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
}
}
processConfig := &ProcessParameters{
ProcessParameters: hcsschema.ProcessParameters{
CreateStdInPipe: (opts.Stdin != nil),
CreateStdOutPipe: (opts.Stdout != nil),
CreateStdErrPipe: (opts.Stderr != nil),
EmulateConsole: false,
},
CreateInUtilityVm: opts.CreateInUtilityVm,
}
if opts.Process != nil {
processConfig.Environment = environment
processConfig.CommandLine = strings.Join(opts.Process.Args, " ")
processConfig.WorkingDirectory = opts.Process.Cwd
if processConfig.WorkingDirectory == "" {
processConfig.WorkingDirectory = `/`
}
}
proc, err := opts.HCSSystem.CreateProcess(processConfig)
if err != nil {
logrus.Debugf("failed to create process: %s", err)
return nil, nil, err
}
processStdin, processStdout, processStderr, err := proc.Stdio()
if err != nil {
proc.Kill() // Should this have a timeout?
proc.Close()
return nil, nil, fmt.Errorf("failed to get stdio pipes for process %+v: %s", processConfig, err)
}
// Send the data into the process's stdin
if opts.Stdin != nil {
if copiedByteCounts.In, err = copywithtimeout.Copy(processStdin,
opts.Stdin,
opts.ByteCounts.In,
"stdin",
opts.CopyTimeout); err != nil {
return nil, nil, err
}
// Don't need stdin now we've sent everything. This signals GCS that we are finished sending data.
if err := proc.CloseStdin(); err != nil && !hcs.IsNotExist(err) && !hcs.IsAlreadyClosed(err) {
// This error will occur if the compute system is currently shutting down
if perr, ok := err.(*hcs.ProcessError); ok && perr.Err != hcs.ErrVmcomputeOperationInvalidState {
return nil, nil, err
}
}
}
// Copy the data back from stdout
if opts.Stdout != nil {
// Copy the data over to the writer.
if copiedByteCounts.Out, err = copywithtimeout.Copy(opts.Stdout,
processStdout,
opts.ByteCounts.Out,
"stdout",
opts.CopyTimeout); err != nil {
return nil, nil, err
}
}
// Copy the data back from stderr
if opts.Stderr != nil {
// Copy the data over to the writer.
if copiedByteCounts.Err, err = copywithtimeout.Copy(opts.Stderr,
processStderr,
opts.ByteCounts.Err,
"stderr",
opts.CopyTimeout); err != nil {
return nil, nil, err
}
}
return proc, copiedByteCounts, nil
}

View File

@ -1,168 +0,0 @@
package lcow
import (
"bytes"
"fmt"
"os"
"strings"
"time"
"github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/internal/copyfile"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/uvm"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// CreateScratch uses a utility VM to create an empty scratch disk of a requested size.
// It has a caching capability. If the cacheFile exists, and the request is for a default
// size, a copy of that is made to the target. If the size is non-default, or the cache file
// does not exist, it uses a utility VM to create target. It is the responsibility of the
// caller to synchronise simultaneous attempts to create the cache file.
func CreateScratch(lcowUVM *uvm.UtilityVM, destFile string, sizeGB uint32, cacheFile string, vmID string) error {
if lcowUVM == nil {
return fmt.Errorf("no uvm")
}
if lcowUVM.OS() != "linux" {
return fmt.Errorf("CreateLCOWScratch requires a linux utility VM to operate!")
}
// Smallest we can accept is the default scratch size as we can't size down, only expand.
if sizeGB < DefaultScratchSizeGB {
sizeGB = DefaultScratchSizeGB
}
logrus.Debugf("hcsshim::CreateLCOWScratch: Dest:%s size:%dGB cache:%s", destFile, sizeGB, cacheFile)
// Retrieve from cache if the default size and already on disk
if cacheFile != "" && sizeGB == DefaultScratchSizeGB {
if _, err := os.Stat(cacheFile); err == nil {
if err := copyfile.CopyFile(cacheFile, destFile, false); err != nil {
return fmt.Errorf("failed to copy cached file '%s' to '%s': %s", cacheFile, destFile, err)
}
logrus.Debugf("hcsshim::CreateLCOWScratch: %s fulfilled from cache (%s)", destFile, cacheFile)
return nil
}
}
// Create the VHDX
if err := vhd.CreateVhdx(destFile, sizeGB, defaultVhdxBlockSizeMB); err != nil {
return fmt.Errorf("failed to create VHDx %s: %s", destFile, err)
}
controller, lun, err := lcowUVM.AddSCSI(destFile, "", false) // No destination as not formatted
if err != nil {
return err
}
logrus.Debugf("hcsshim::CreateLCOWScratch: %s at C=%d L=%d", destFile, controller, lun)
// Validate /sys/bus/scsi/devices/C:0:0:L exists as a directory
startTime := time.Now()
for {
testdCommand := []string{"test", "-d", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d", controller, lun)}
testdProc, _, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
CreateInUtilityVm: true,
CopyTimeout: timeout.ExternalCommandToStart,
Process: &specs.Process{Args: testdCommand},
})
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to run %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err)
}
defer testdProc.Close()
testdProc.WaitTimeout(timeout.ExternalCommandToComplete)
testdExitCode, err := testdProc.ExitCode()
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to get exit code from from %+v following hot-add %s to utility VM: %s", testdCommand, destFile, err)
}
if testdExitCode != 0 {
currentTime := time.Now()
elapsedTime := currentTime.Sub(startTime)
if elapsedTime > timeout.TestDRetryLoop {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", testdCommand, testdExitCode, destFile)
}
} else {
break
}
time.Sleep(time.Millisecond * 10)
}
// Get the device from under the block subdirectory by doing a simple ls. This will come back as (eg) `sda`
var lsOutput bytes.Buffer
lsCommand := []string{"ls", fmt.Sprintf("/sys/bus/scsi/devices/%d:0:0:%d/block", controller, lun)}
lsProc, _, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
CreateInUtilityVm: true,
CopyTimeout: timeout.ExternalCommandToStart,
Process: &specs.Process{Args: lsCommand},
Stdout: &lsOutput,
})
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
}
defer lsProc.Close()
lsProc.WaitTimeout(timeout.ExternalCommandToComplete)
lsExitCode, err := lsProc.ExitCode()
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", lsCommand, destFile, err)
}
if lsExitCode != 0 {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM", lsCommand, lsExitCode, destFile)
}
device := fmt.Sprintf(`/dev/%s`, strings.TrimSpace(lsOutput.String()))
logrus.Debugf("hcsshim: CreateExt4Vhdx: %s: device at %s", destFile, device)
// Format it ext4
mkfsCommand := []string{"mkfs.ext4", "-q", "-E", "lazy_itable_init=1", "-O", `^has_journal,sparse_super2,uninit_bg,^resize_inode`, device}
var mkfsStderr bytes.Buffer
mkfsProc, _, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
CreateInUtilityVm: true,
CopyTimeout: timeout.ExternalCommandToStart,
Process: &specs.Process{Args: mkfsCommand},
Stderr: &mkfsStderr,
})
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err)
}
defer mkfsProc.Close()
mkfsProc.WaitTimeout(timeout.ExternalCommandToComplete)
mkfsExitCode, err := mkfsProc.ExitCode()
if err != nil {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("failed to get exit code from `%+v` following hot-add %s to utility VM: %s", mkfsCommand, destFile, err)
}
if mkfsExitCode != 0 {
lcowUVM.RemoveSCSI(destFile)
return fmt.Errorf("`%+v` return non-zero exit code (%d) following hot-add %s to utility VM: %s", mkfsCommand, mkfsExitCode, destFile, strings.TrimSpace(mkfsStderr.String()))
}
// Hot-Remove before we copy it
if err := lcowUVM.RemoveSCSI(destFile); err != nil {
return fmt.Errorf("failed to hot-remove: %s", err)
}
// Populate the cache.
if cacheFile != "" && (sizeGB == DefaultScratchSizeGB) {
if err := copyfile.CopyFile(destFile, cacheFile, true); err != nil {
return fmt.Errorf("failed to seed cache '%s' from '%s': %s", destFile, cacheFile, err)
}
}
logrus.Debugf("hcsshim::CreateLCOWScratch: %s created (non-cache)", destFile)
return nil
}

View File

@ -1,46 +0,0 @@
package lcow
import (
"fmt"
"io"
"os"
"time"
"github.com/Microsoft/hcsshim/internal/uvm"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/sirupsen/logrus"
)
// TarToVhd streams a tarstream contained in an io.Reader to a fixed vhd file
func TarToVhd(lcowUVM *uvm.UtilityVM, targetVHDFile string, reader io.Reader) (int64, error) {
logrus.Debugf("hcsshim: TarToVhd: %s", targetVHDFile)
if lcowUVM == nil {
return 0, fmt.Errorf("no utility VM passed")
}
//defer uvm.DebugLCOWGCS()
outFile, err := os.Create(targetVHDFile)
if err != nil {
return 0, fmt.Errorf("tar2vhd failed to create %s: %s", targetVHDFile, err)
}
defer outFile.Close()
// BUGBUG Delete the file on failure
tar2vhd, byteCounts, err := CreateProcess(&ProcessOptions{
HCSSystem: lcowUVM.ComputeSystem(),
Process: &specs.Process{Args: []string{"tar2vhd"}},
CreateInUtilityVm: true,
Stdin: reader,
Stdout: outFile,
CopyTimeout: 2 * time.Minute,
})
if err != nil {
return 0, fmt.Errorf("failed to start tar2vhd for %s: %s", targetVHDFile, err)
}
defer tar2vhd.Close()
logrus.Debugf("hcsshim: TarToVhd: %s created, %d bytes", targetVHDFile, byteCounts.Out)
return byteCounts.Out, err
}

View File

@ -1,11 +0,0 @@
package lcow
import "github.com/Microsoft/hcsshim/internal/schema2"
// Additional fields to hcsschema.ProcessParameters used by LCOW
type ProcessParameters struct {
hcsschema.ProcessParameters
CreateInUtilityVm bool `json:",omitempty"`
OCIProcess interface{} `json:"OciProcess,omitempty"`
}

View File

@ -1,75 +0,0 @@
package lcow
import (
"fmt"
"io"
// "os"
"github.com/Microsoft/hcsshim/internal/uvm"
// specs "github.com/opencontainers/runtime-spec/specs-go"
// "github.com/sirupsen/logrus"
)
// VhdToTar does what is says - it exports a VHD in a specified
// folder (either a read-only layer.vhd, or a read-write scratch vhdx) to a
// ReadCloser containing a tar-stream of the layers contents.
func VhdToTar(lcowUVM *uvm.UtilityVM, vhdFile string, uvmMountPath string, isContainerScratch bool, vhdSize int64) (io.ReadCloser, error) {
return nil, fmt.Errorf("not implemented yet")
// logrus.Debugf("hcsshim: VhdToTar: %s isScratch: %t", vhdFile, isContainerScratch)
// if lcowUVM == nil {
// return nil, fmt.Errorf("cannot VhdToTar as no utility VM is in configuration")
// }
// //defer uvm.DebugLCOWGCS()
// vhdHandle, err := os.Open(vhdFile)
// if err != nil {
// return nil, fmt.Errorf("hcsshim: VhdToTar: failed to open %s: %s", vhdFile, err)
// }
// defer vhdHandle.Close()
// logrus.Debugf("hcsshim: VhdToTar: exporting %s, size %d, isScratch %t", vhdHandle.Name(), vhdSize, isContainerScratch)
// // Different binary depending on whether a RO layer or a RW scratch
// command := "vhd2tar"
// if isContainerScratch {
// command = fmt.Sprintf("exportSandbox -path %s", uvmMountPath)
// }
// // tar2vhd, byteCounts, err := lcowUVM.CreateProcess(&uvm.ProcessOptions{
// // Process: &specs.Process{Args: []string{"tar2vhd"}},
// // Stdin: reader,
// // Stdout: outFile,
// // })
// // Start the binary in the utility VM
// proc, stdin, stdout, _, err := config.createLCOWUVMProcess(command)
// if err != nil {
// return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to create utils process %s: %s", vhdHandle.Name(), command, err)
// }
// if !isContainerScratch {
// // Send the VHD contents to the utility VM processes stdin handle if not a container scratch
// logrus.Debugf("hcsshim: VhdToTar: copying the layer VHD into the utility VM")
// if _, err = copyWithTimeout(stdin, vhdHandle, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: sending %s to %s", vhdHandle.Name(), command)); err != nil {
// proc.Close()
// return nil, fmt.Errorf("hcsshim: VhdToTar: %s: failed to copyWithTimeout on the stdin pipe (to utility VM): %s", vhdHandle.Name(), err)
// }
// }
// // Start a goroutine which copies the stdout (ie the tar stream)
// reader, writer := io.Pipe()
// go func() {
// defer writer.Close()
// defer proc.Close()
// logrus.Debugf("hcsshim: VhdToTar: copying tar stream back from the utility VM")
// bytes, err := copyWithTimeout(writer, stdout, vhdSize, processOperationTimeoutSeconds, fmt.Sprintf("vhdtotarstream: copy tarstream from %s", command))
// if err != nil {
// logrus.Errorf("hcsshim: VhdToTar: %s: copyWithTimeout on the stdout pipe (from utility VM) failed: %s", vhdHandle.Name(), err)
// }
// logrus.Debugf("hcsshim: VhdToTar: copied %d bytes of the tarstream of %s from the utility VM", bytes, vhdHandle.Name())
// }()
// // Return the read-side of the pipe connected to the goroutine which is reading from the stdout of the process in the utility VM
// return reader, nil
}

View File

@ -26,11 +26,6 @@ const (
Uint32 = "uint32"
Uint64 = "uint64"
// HCS
HCSOperation = "hcs-op"
HCSOperationResult = "hcs-op-result"
// runhcs
VMShimOperation = "vmshim-op"

View File

@ -1,79 +0,0 @@
// Package ociwclayer provides functions for importing and exporting Windows
// container layers from and to their OCI tar representation.
package ociwclayer
import (
"io"
"path/filepath"
"github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
)
var driverInfo = hcsshim.DriverInfo{}
// ExportLayer writes an OCI layer tar stream from the provided on-disk layer.
// The caller must specify the parent layers, if any, ordered from lowest to
// highest layer.
//
// The layer will be mounted for this process, so the caller should ensure that
// it is not currently mounted.
func ExportLayer(w io.Writer, path string, parentLayerPaths []string) error {
err := hcsshim.ActivateLayer(driverInfo, path)
if err != nil {
return err
}
defer hcsshim.DeactivateLayer(driverInfo, path)
// Prepare and unprepare the layer to ensure that it has been initialized.
err = hcsshim.PrepareLayer(driverInfo, path, parentLayerPaths)
if err != nil {
return err
}
err = hcsshim.UnprepareLayer(driverInfo, path)
if err != nil {
return err
}
r, err := hcsshim.NewLayerReader(driverInfo, path, parentLayerPaths)
if err != nil {
return err
}
err = writeTarFromLayer(r, w)
cerr := r.Close()
if err != nil {
return err
}
return cerr
}
func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error {
t := tar.NewWriter(w)
for {
name, size, fileInfo, err := r.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if fileInfo == nil {
// Write a whiteout file.
hdr := &tar.Header{
Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), whiteoutPrefix+filepath.Base(name))),
}
err := t.WriteHeader(hdr)
if err != nil {
return err
}
} else {
err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
if err != nil {
return err
}
}
}
return t.Close()
}

View File

@ -1,141 +0,0 @@
package ociwclayer
import (
"bufio"
"io"
"os"
"path"
"path/filepath"
"strings"
winio "github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/archive/tar"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
)
const whiteoutPrefix = ".wh."
var (
// mutatedFiles is a list of files that are mutated by the import process
// and must be backed up and restored.
mutatedFiles = map[string]string{
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
}
)
// ImportLayer reads a layer from an OCI layer tar stream and extracts it to the
// specified path. The caller must specify the parent layers, if any, ordered
// from lowest to highest layer.
//
// The caller must ensure that the thread or process has acquired backup and
// restore privileges.
//
// This function returns the total size of the layer's files, in bytes.
func ImportLayer(r io.Reader, path string, parentLayerPaths []string) (int64, error) {
err := os.MkdirAll(path, 0)
if err != nil {
return 0, err
}
w, err := hcsshim.NewLayerWriter(hcsshim.DriverInfo{}, path, parentLayerPaths)
if err != nil {
return 0, err
}
n, err := writeLayerFromTar(r, w, path)
cerr := w.Close()
if err != nil {
return 0, err
}
if cerr != nil {
return 0, cerr
}
return n, nil
}
func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
t := tar.NewReader(r)
hdr, err := t.Next()
totalSize := int64(0)
buf := bufio.NewWriter(nil)
for err == nil {
base := path.Base(hdr.Name)
if strings.HasPrefix(base, whiteoutPrefix) {
name := path.Join(path.Dir(hdr.Name), base[len(whiteoutPrefix):])
err = w.Remove(filepath.FromSlash(name))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else if hdr.Typeflag == tar.TypeLink {
err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
if err != nil {
return 0, err
}
hdr, err = t.Next()
} else {
var (
name string
size int64
fileInfo *winio.FileBasicInfo
)
name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr)
if err != nil {
return 0, err
}
err = w.Add(filepath.FromSlash(name), fileInfo)
if err != nil {
return 0, err
}
hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
totalSize += size
}
}
if err != io.EOF {
return 0, err
}
return totalSize, nil
}
// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
// writes it to a backup stream, and also saves any files that will be mutated
// by the import layer process to a backup location.
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
var bcdBackup *os.File
var bcdBackupWriter *winio.BackupFileWriter
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
if err != nil {
return nil, err
}
defer func() {
cerr := bcdBackup.Close()
if err == nil {
err = cerr
}
}()
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
defer func() {
cerr := bcdBackupWriter.Close()
if err == nil {
err = cerr
}
}()
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
} else {
buf.Reset(w)
}
defer func() {
ferr := buf.Flush()
if err == nil {
err = ferr
}
}()
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
}

View File

@ -1,14 +0,0 @@
package ospath
import (
"path"
"path/filepath"
)
// Join joins paths using the target OS's path separator.
func Join(os string, elem ...string) string {
if os == "windows" {
return filepath.Join(elem...)
}
return path.Join(elem...)
}

View File

@ -1,185 +0,0 @@
package regstate
import (
"os"
"testing"
)
var testKey = "runhcs-test-test-key"
func prepTest(t *testing.T) {
err := RemoveAll(testKey, true)
if err != nil && !os.IsNotExist(err) {
t.Fatal(err)
}
}
func TestLifetime(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
ids, err := k.Enumerate()
if err != nil {
t.Fatal(err)
}
if len(ids) != 0 {
t.Fatal("wrong count", len(ids))
}
id := "a/b/c"
key := "key"
err = k.Set(id, key, 1)
if err == nil {
t.Fatal("expected error")
}
var i int
err = k.Get(id, key, &i)
if err == nil {
t.Fatal("expected error")
}
err = k.Create(id, key, 2)
if err != nil {
t.Fatal(err)
}
ids, err = k.Enumerate()
if err != nil {
t.Fatal(err)
}
if len(ids) != 1 {
t.Fatal("wrong count", len(ids))
}
if ids[0] != id {
t.Fatal("wrong value", ids[0])
}
err = k.Get(id, key, &i)
if err != nil {
t.Fatal(err)
}
if i != 2 {
t.Fatal("got wrong value", i)
}
err = k.Set(id, key, 3)
if err != nil {
t.Fatal(err)
}
err = k.Get(id, key, &i)
if err != nil {
t.Fatal(err)
}
if i != 3 {
t.Fatal("got wrong value", i)
}
err = k.Remove(id)
if err != nil {
t.Fatal(err)
}
err = k.Remove(id)
if err == nil {
t.Fatal("expected error")
}
ids, err = k.Enumerate()
if err != nil {
t.Fatal(err)
}
if len(ids) != 0 {
t.Fatal("wrong count", len(ids))
}
}
func TestBool(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
err = k.Create(id, key, true)
if err != nil {
t.Fatal(err)
}
b := false
err = k.Get(id, key, &b)
if err != nil {
t.Fatal(err)
}
if !b {
t.Fatal("value did not marshal correctly")
}
}
func TestInt(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
err = k.Create(id, key, 10)
if err != nil {
t.Fatal(err)
}
v := 0
err = k.Get(id, key, &v)
if err != nil {
t.Fatal(err)
}
if v != 10 {
t.Fatal("value did not marshal correctly")
}
}
func TestString(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
err = k.Create(id, key, "blah")
if err != nil {
t.Fatal(err)
}
v := ""
err = k.Get(id, key, &v)
if err != nil {
t.Fatal(err)
}
if v != "blah" {
t.Fatal("value did not marshal correctly")
}
}
func TestJson(t *testing.T) {
prepTest(t)
k, err := Open(testKey, true)
if err != nil {
t.Fatal(err)
}
id := "x"
key := "y"
v := struct{ X int }{5}
err = k.Create(id, key, &v)
if err != nil {
t.Fatal(err)
}
v.X = 0
err = k.Get(id, key, &v)
if err != nil {
t.Fatal(err)
}
if v.X != 5 {
t.Fatal("value did not marshal correctly: ", v)
}
}

View File

@ -1,10 +0,0 @@
package requesttype
// These are constants for v2 schema modify requests.
// RequestType const
const (
Add = "Add"
Remove = "Remove"
PreAdd = "PreAdd" // For networking
)

View File

@ -1,17 +0,0 @@
package runhcs
import (
"testing"
)
func Test_SafePipePath(t *testing.T) {
tests := []string{"test", "test with spaces", "test/with\\\\.\\slashes", "test.with..dots..."}
expected := []string{"test", "test%20with%20spaces", "test%2Fwith%5C%5C.%5Cslashes", "test.with..dots..."}
for i, test := range tests {
actual := SafePipePath(test)
e := SafePipePrefix + expected[i]
if actual != e {
t.Fatalf("SafePipePath: actual '%s' != '%s'", actual, expected[i])
}
}
}

View File

@ -87,7 +87,7 @@ func OpenRoot(path string) (*os.File, error) {
func ntRelativePath(path string) ([]uint16, error) {
path = filepath.Clean(path)
if strings.Contains(":", path) {
if strings.Contains(path, ":") {
// Since alternate data streams must follow the file they
// are attached to, finding one here (out of order) is invalid.
return nil, errors.New("path contains invalid character `:`")

View File

@ -1,125 +0,0 @@
// +build admin
package safefile
import (
"os"
"path/filepath"
"syscall"
"testing"
)
func TestOpenRelative(t *testing.T) {
badroot, err := tempRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(badroot.Name())
defer badroot.Close()
root, err := tempRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root.Name())
defer root.Close()
// Create a file
f, err := OpenRelative("foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0)
if err != nil {
t.Fatal(err)
}
f.Close()
// Create a directory
err = MkdirRelative("dir", root)
if err != nil {
t.Fatal(err)
}
// Create a file in the bad root
f, err = os.Create(filepath.Join(badroot.Name(), "badfile"))
if err != nil {
t.Fatal(err)
}
f.Close()
// Create a directory symlink to the bad root
err = os.Symlink(badroot.Name(), filepath.Join(root.Name(), "dsymlink"))
if err != nil {
t.Fatal(err)
}
// Create a file symlink to the bad file
err = os.Symlink(filepath.Join(badroot.Name(), "badfile"), filepath.Join(root.Name(), "symlink"))
if err != nil {
t.Fatal(err)
}
// Make sure opens cannot happen through the symlink
f, err = OpenRelative("dsymlink/foo", root, 0, syscall.FILE_SHARE_READ, FILE_CREATE, 0)
if err == nil {
f.Close()
t.Fatal("created file in wrong tree!")
}
t.Log(err)
// Check again using EnsureNotReparsePointRelative
err = EnsureNotReparsePointRelative("dsymlink", root)
if err == nil {
t.Fatal("reparse check should have failed")
}
t.Log(err)
// Make sure links work
err = LinkRelative("foo", root, "hardlink", root)
if err != nil {
t.Fatal(err)
}
// Even inside directories
err = LinkRelative("foo", root, "dir/bar", root)
if err != nil {
t.Fatal(err)
}
// Make sure links cannot happen through the symlink
err = LinkRelative("foo", root, "dsymlink/hardlink", root)
if err == nil {
f.Close()
t.Fatal("created link in wrong tree!")
}
t.Log(err)
// In either direction
err = LinkRelative("dsymlink/badfile", root, "bar", root)
if err == nil {
f.Close()
t.Fatal("created link in wrong tree!")
}
t.Log(err)
// Make sure remove cannot happen through the symlink
err = RemoveRelative("symlink/badfile", root)
if err == nil {
t.Fatal("remove in wrong tree!")
}
// Remove the symlink
err = RemoveAllRelative("symlink", root)
if err != nil {
t.Fatal(err)
}
// Make sure it's not possible to escape with .. (NT doesn't support .. at the kernel level)
f, err = OpenRelative("..", root, syscall.GENERIC_READ, syscall.FILE_SHARE_READ, FILE_OPEN, 0)
if err == nil {
t.Fatal("escaped the directory")
}
t.Log(err)
// Should not have touched the other directory
if _, err = os.Lstat(filepath.Join(badroot.Name(), "badfile")); err != nil {
t.Fatal(err)
}
}

View File

@ -1,53 +0,0 @@
package safefile
import (
"io/ioutil"
"os"
"path/filepath"
"syscall"
"testing"
winio "github.com/Microsoft/go-winio"
)
func tempRoot() (*os.File, error) {
name, err := ioutil.TempDir("", "hcsshim-test")
if err != nil {
return nil, err
}
f, err := OpenRoot(name)
if err != nil {
os.Remove(name)
return nil, err
}
return f, nil
}
func TestRemoveRelativeReadOnly(t *testing.T) {
root, err := tempRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root.Name())
defer root.Close()
p := filepath.Join(root.Name(), "foo")
f, err := os.Create(p)
if err != nil {
t.Fatal(err)
}
defer f.Close()
bi := winio.FileBasicInfo{}
bi.FileAttributes = syscall.FILE_ATTRIBUTE_READONLY
err = winio.SetFileBasicInfo(f, &bi)
if err != nil {
t.Fatal(err)
}
f.Close()
err = RemoveRelative("foo", root)
if err != nil {
t.Fatal(err)
}
}

View File

@ -1,81 +0,0 @@
// +build windows
package schemaversion
import (
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
// SchemaV10 makes it easy for callers to get a v1.0 schema version object
func SchemaV10() *hcsschema.Version {
return &hcsschema.Version{Major: 1, Minor: 0}
}
// SchemaV21 makes it easy for callers to get a v2.1 schema version object
func SchemaV21() *hcsschema.Version {
return &hcsschema.Version{Major: 2, Minor: 1}
}
// isSupported determines if a given schema version is supported
func IsSupported(sv *hcsschema.Version) error {
if IsV10(sv) {
return nil
}
if IsV21(sv) {
if osversion.Get().Build < osversion.RS5 {
return fmt.Errorf("unsupported on this Windows build")
}
return nil
}
return fmt.Errorf("unknown schema version %s", String(sv))
}
// IsV10 determines if a given schema version object is 1.0. This was the only thing
// supported in RS1..3. It lives on in RS5, but will be deprecated in a future release.
func IsV10(sv *hcsschema.Version) bool {
if sv.Major == 1 && sv.Minor == 0 {
return true
}
return false
}
// IsV21 determines if a given schema version object is 2.0. This was introduced in
// RS4, but not fully implemented. Recommended for applications using HCS in RS5
// onwards.
func IsV21(sv *hcsschema.Version) bool {
if sv.Major == 2 && sv.Minor == 1 {
return true
}
return false
}
// String returns a JSON encoding of a schema version object
func String(sv *hcsschema.Version) string {
b, err := json.Marshal(sv)
if err != nil {
return ""
}
return string(b[:])
}
// DetermineSchemaVersion works out what schema version to use based on build and
// requested option.
func DetermineSchemaVersion(requestedSV *hcsschema.Version) *hcsschema.Version {
sv := SchemaV10()
if osversion.Get().Build >= osversion.RS5 {
sv = SchemaV21()
}
if requestedSV != nil {
if err := IsSupported(requestedSV); err == nil {
sv = requestedSV
} else {
logrus.Warnf("Ignoring unsupported requested schema version %+v", requestedSV)
}
}
return sv
}

View File

@ -1,63 +0,0 @@
package schemaversion
import (
"io/ioutil"
"testing"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/osversion"
_ "github.com/Microsoft/hcsshim/test/functional/manifest"
"github.com/sirupsen/logrus"
)
func init() {
logrus.SetOutput(ioutil.Discard)
}
func TestDetermineSchemaVersion(t *testing.T) {
osv := osversion.Get()
if osv.Build >= osversion.RS5 {
if sv := DetermineSchemaVersion(nil); !IsV21(sv) {
t.Fatalf("expected v2")
}
if sv := DetermineSchemaVersion(SchemaV21()); !IsV21(sv) {
t.Fatalf("expected requested v2")
}
if sv := DetermineSchemaVersion(SchemaV10()); !IsV10(sv) {
t.Fatalf("expected requested v1")
}
if sv := DetermineSchemaVersion(&hcsschema.Version{}); !IsV21(sv) {
t.Fatalf("expected requested v2")
}
if err := IsSupported(SchemaV21()); err != nil {
t.Fatalf("v2 expected to be supported")
}
if err := IsSupported(SchemaV10()); err != nil {
t.Fatalf("v1 expected to be supported")
}
} else {
if sv := DetermineSchemaVersion(nil); !IsV10(sv) {
t.Fatalf("expected v1")
}
// Pre RS5 will downgrade to v1 even if request v2
if sv := DetermineSchemaVersion(SchemaV21()); !IsV10(sv) {
t.Fatalf("expected requested v1")
}
if sv := DetermineSchemaVersion(SchemaV10()); !IsV10(sv) {
t.Fatalf("expected requested v1")
}
if sv := DetermineSchemaVersion(&hcsschema.Version{}); !IsV10(sv) {
t.Fatalf("expected requested v1")
}
if err := IsSupported(SchemaV21()); err == nil {
t.Fatalf("didn't expect v2 to be supported")
}
if err := IsSupported(SchemaV10()); err != nil {
t.Fatalf("v1 expected to be supported")
}
}
}

View File

@ -1,19 +0,0 @@
package uvm
import "fmt"
const (
// MaxVPMEMCount is the maximum number of VPMem devices that may be added to an LCOW
// utility VM
MaxVPMEMCount = 128
// DefaultVPMEMCount is the default number of VPMem devices that may be added to an LCOW
// utility VM if the create request doesn't specify how many.
DefaultVPMEMCount = 64
// DefaultVPMemSizeBytes is the default size of a VPMem device if the create request
// doesn't specify.
DefaultVPMemSizeBytes = 4 * 1024 * 1024 * 1024 // 4GB
)
var errNotSupported = fmt.Errorf("not supported")

View File

@ -1,11 +0,0 @@
package uvm
import (
"sync/atomic"
)
// ContainerCounter is used for where we layout things for a container in
// a utility VM. For WCOW it'll be C:\c\N\. For LCOW it'll be /run/gcs/c/N/.
func (uvm *UtilityVM) ContainerCounter() uint64 {
return atomic.AddUint64(&uvm.containerCounter, 1)
}

View File

@ -1,62 +0,0 @@
package uvm
import (
"runtime"
)
// Options are the set of options passed to Create() to create a utility vm.
type Options struct {
ID string // Identifier for the uvm. Defaults to generated GUID.
Owner string // Specifies the owner. Defaults to executable name.
AdditionHCSDocumentJSON string // Optional additional JSON to merge into the HCS document prior
// MemorySizeInMB sets the UVM memory. If `0` will default to platform
// default.
MemorySizeInMB int32
// Memory for UVM. Defaults to true. For physical backed memory, set to
// false.
AllowOvercommit bool
// Memory for UVM. Defaults to false. For virtual memory with deferred
// commit, set to true.
EnableDeferredCommit bool
// ProcessorCount sets the number of vCPU's. If `0` will default to platform
// default.
ProcessorCount int32
}
// ID returns the ID of the VM's compute system.
func (uvm *UtilityVM) ID() string {
return uvm.hcsSystem.ID()
}
// OS returns the operating system of the utility VM.
func (uvm *UtilityVM) OS() string {
return uvm.operatingSystem
}
// Close terminates and releases resources associated with the utility VM.
func (uvm *UtilityVM) Close() error {
uvm.Terminate()
// outputListener will only be nil for a Create -> Stop without a Start. In
// this case we have no goroutine processing output so its safe to close the
// channel here.
if uvm.outputListener != nil {
close(uvm.outputProcessingDone)
uvm.outputListener.Close()
uvm.outputListener = nil
}
err := uvm.hcsSystem.Close()
uvm.hcsSystem = nil
return err
}
func defaultProcessorCount() int32 {
if runtime.NumCPU() == 1 {
return 1
}
return 2
}

View File

@ -1,361 +0,0 @@
package uvm
import (
"encoding/binary"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/mergemaps"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/Microsoft/hcsshim/osversion"
"github.com/linuxkit/virtsock/pkg/hvsock"
"github.com/sirupsen/logrus"
)
type PreferredRootFSType int
const (
PreferredRootFSTypeInitRd PreferredRootFSType = iota
PreferredRootFSTypeVHD
)
// OutputHandler is used to process the output from the program run in the UVM.
type OutputHandler func(io.Reader)
const (
// InitrdFile is the default file name for an initrd.img used to boot LCOW.
InitrdFile = "initrd.img"
// VhdFile is the default file name for a rootfs.vhd used to boot LCOW.
VhdFile = "rootfs.vhd"
)
// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm.
type OptionsLCOW struct {
*Options
BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers
KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel`
KernelDirect bool // Skip UEFI and boot directly to `kernel`
RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile`
KernelBootOptions string // Additional boot options for the kernel
EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM
ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe
SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1.
UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true
ExecCommandLine string // The command line to exec from init. Defaults to GCS
ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false
ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true
OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages
VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken.
VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`.
PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD`
}
// NewDefaultOptionsLCOW creates the default options for a bootable version of
// LCOW.
//
// `id` the ID of the compute system. If not passed will generate a new GUID.
//
// `owner` the owner of the compute system. If not passed will use the
// executable files name.
func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW {
opts := &OptionsLCOW{
Options: &Options{
ID: id,
Owner: owner,
MemorySizeInMB: 1024,
AllowOvercommit: true,
EnableDeferredCommit: false,
ProcessorCount: defaultProcessorCount(),
},
BootFilesPath: filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers"),
KernelFile: "kernel",
KernelDirect: osversion.Get().Build >= 18286, // Use KernelDirect boot by default on all builds that support it.
RootFSFile: InitrdFile,
KernelBootOptions: "",
EnableGraphicsConsole: false,
ConsolePipe: "",
SCSIControllerCount: 1,
UseGuestConnection: true,
ExecCommandLine: fmt.Sprintf("/bin/gcs -log-format json -loglevel %s", logrus.StandardLogger().Level.String()),
ForwardStdout: false,
ForwardStderr: true,
OutputHandler: parseLogrus,
VPMemDeviceCount: DefaultVPMEMCount,
VPMemSizeBytes: DefaultVPMemSizeBytes,
PreferredRootFSType: PreferredRootFSTypeInitRd,
}
if opts.ID == "" {
opts.ID = guid.New().String()
}
if opts.Owner == "" {
opts.Owner = filepath.Base(os.Args[0])
}
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil {
// We have a rootfs.vhd in the boot files path. Use it over an initrd.img
opts.RootFSFile = VhdFile
opts.PreferredRootFSType = PreferredRootFSTypeVHD
}
return opts
}
const linuxLogVsockPort = 109
// CreateLCOW creates an HCS compute system representing a utility VM.
func CreateLCOW(opts *OptionsLCOW) (_ *UtilityVM, err error) {
logrus.Debugf("uvm::CreateLCOW %+v", opts)
// We dont serialize OutputHandler so if it is missing we need to put it back to the default.
if opts.OutputHandler == nil {
opts.OutputHandler = parseLogrus
}
uvm := &UtilityVM{
id: opts.ID,
owner: opts.Owner,
operatingSystem: "linux",
scsiControllerCount: opts.SCSIControllerCount,
vpmemMaxCount: opts.VPMemDeviceCount,
vpmemMaxSizeBytes: opts.VPMemSizeBytes,
}
kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile)
if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath)
}
rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile)
if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath)
}
if opts.SCSIControllerCount > 1 {
return nil, fmt.Errorf("SCSI controller count must be 0 or 1") // Future extension here for up to 4
}
if opts.VPMemDeviceCount > MaxVPMEMCount {
return nil, fmt.Errorf("vpmem device count cannot be greater than %d", MaxVPMEMCount)
}
if uvm.vpmemMaxCount > 0 {
if opts.VPMemSizeBytes%4096 != 0 {
return nil, fmt.Errorf("opts.VPMemSizeBytes must be a multiple of 4096")
}
} else {
if opts.PreferredRootFSType == PreferredRootFSTypeVHD {
return nil, fmt.Errorf("PreferredRootFSTypeVHD requires at least one VPMem device")
}
}
if opts.KernelDirect && osversion.Get().Build < 18286 {
return nil, fmt.Errorf("KernelDirectBoot is not support on builds older than 18286")
}
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: opts.MemorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
},
Processor: &hcsschema.Processor2{
Count: opts.ProcessorCount,
},
},
Devices: &hcsschema.Devices{
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
},
},
},
},
}
if opts.UseGuestConnection {
doc.VirtualMachine.GuestConnection = &hcsschema.GuestConnection{
UseVsock: true,
UseConnectedSuspend: true,
}
}
if uvm.scsiControllerCount > 0 {
// TODO: JTERRY75 - this should enumerate scsicount and add an entry per value.
doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{
"0": {
Attachments: make(map[string]hcsschema.Attachment),
},
}
}
if uvm.vpmemMaxCount > 0 {
doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{
MaximumCount: uvm.vpmemMaxCount,
MaximumSizeBytes: uvm.vpmemMaxSizeBytes,
}
}
var kernelArgs string
switch opts.PreferredRootFSType {
case PreferredRootFSTypeInitRd:
if !opts.KernelDirect {
kernelArgs = "initrd=/" + opts.RootFSFile
}
case PreferredRootFSTypeVHD:
// Support for VPMem VHD(X) booting rather than initrd..
kernelArgs = "root=/dev/pmem0 ro init=/init"
imageFormat := "Vhd1"
if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" {
imageFormat = "Vhdx"
}
doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{
"0": {
HostPath: rootfsFullPath,
ReadOnly: true,
ImageFormat: imageFormat,
},
}
if err := wclayer.GrantVmAccess(uvm.id, rootfsFullPath); err != nil {
return nil, fmt.Errorf("failed to grantvmaccess to %s: %s", rootfsFullPath, err)
}
// Add to our internal structure
uvm.vpmemDevices[0] = vpmemInfo{
hostPath: opts.RootFSFile,
uvmPath: "/",
refCount: 1,
}
}
vmDebugging := false
if opts.ConsolePipe != "" {
vmDebugging = true
kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200"
doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{
"0": { // Which is actually COM1
NamedPipe: opts.ConsolePipe,
},
}
} else {
kernelArgs += " 8250_core.nr_uarts=0"
}
if opts.EnableGraphicsConsole {
vmDebugging = true
kernelArgs += " console=tty"
doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{}
doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{}
doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{}
}
if !vmDebugging {
// Terminate the VM if there is a kernel panic.
kernelArgs += " panic=-1 quiet"
}
if opts.KernelBootOptions != "" {
kernelArgs += " " + opts.KernelBootOptions
}
// With default options, run GCS with stderr pointing to the vsock port
// created below in order to forward guest logs to logrus.
initArgs := "/bin/vsockexec"
if opts.ForwardStdout {
initArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort)
}
if opts.ForwardStderr {
initArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort)
}
initArgs += " " + opts.ExecCommandLine
if vmDebugging {
// Launch a shell on the console.
initArgs = `sh -c "` + initArgs + ` & exec sh"`
}
kernelArgs += ` pci=off brd.rd_nr=0 pmtmr=0 -- ` + initArgs
if !opts.KernelDirect {
doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{
BootThis: &hcsschema.UefiBootEntry{
DevicePath: `\` + opts.KernelFile,
DeviceType: "VmbFs",
VmbFsRootPath: opts.BootFilesPath,
OptionalData: kernelArgs,
},
}
} else {
doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{
KernelFilePath: kernelFullPath,
KernelCmdLine: kernelArgs,
}
if opts.PreferredRootFSType == PreferredRootFSTypeInitRd {
doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath
}
}
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
if err != nil {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
}
hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc)
if err != nil {
logrus.Debugln("failed to create UVM: ", err)
return nil, err
}
uvm.hcsSystem = hcsSystem
defer func() {
if err != nil {
uvm.Close()
}
}()
// Create a socket that the executed program can send to. This is usually
// used by GCS to send log data.
if opts.ForwardStdout || opts.ForwardStderr {
uvm.outputHandler = opts.OutputHandler
uvm.outputProcessingDone = make(chan struct{})
uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort)
if err != nil {
return nil, err
}
}
return uvm, nil
}
func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) {
properties, err := uvm.hcsSystem.Properties()
if err != nil {
return nil, err
}
vmID, err := hvsock.GUIDFromString(properties.RuntimeID)
if err != nil {
return nil, err
}
serviceID, _ := hvsock.GUIDFromString("00000000-facb-11e6-bd58-64006a7986d3")
binary.LittleEndian.PutUint32(serviceID[0:4], port)
return hvsock.Listen(hvsock.Addr{VMID: vmID, ServiceID: serviceID})
}
// PMemMaxSizeBytes returns the maximum size of a PMEM layer (LCOW)
func (uvm *UtilityVM) PMemMaxSizeBytes() uint64 {
return uvm.vpmemMaxSizeBytes
}

View File

@ -1,25 +0,0 @@
package uvm
import (
"testing"
)
// Unit tests for negative testing of input to uvm.Create()
func TestCreateBadBootFilesPath(t *testing.T) {
opts := NewDefaultOptionsLCOW(t.Name(), "")
opts.BootFilesPath = `c:\does\not\exist\I\hope`
_, err := CreateLCOW(opts)
if err == nil || err.Error() != `kernel: 'c:\does\not\exist\I\hope\kernel' not found` {
t.Fatal(err)
}
}
func TestCreateWCOWBadLayerFolders(t *testing.T) {
opts := NewDefaultOptionsWCOW(t.Name(), "")
_, err := CreateWCOW(opts)
if err == nil || (err != nil && err.Error() != `at least 2 LayerFolders must be supplied`) {
t.Fatal(err)
}
}

View File

@ -1,186 +0,0 @@
package uvm
import (
"fmt"
"os"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/mergemaps"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvmfolder"
"github.com/Microsoft/hcsshim/internal/wcow"
"github.com/sirupsen/logrus"
)
// OptionsWCOW are the set of options passed to CreateWCOW() to create a utility vm.
type OptionsWCOW struct {
*Options
LayerFolders []string // Set of folders for base layers and scratch. Ordered from top most read-only through base read-only layer, followed by scratch
}
// NewDefaultOptionsWCOW creates the default options for a bootable version of
// WCOW. The caller `MUST` set the `LayerFolders` path on the returned value.
//
// `id` the ID of the compute system. If not passed will generate a new GUID.
//
// `owner` the owner of the compute system. If not passed will use the
// executable files name.
func NewDefaultOptionsWCOW(id, owner string) *OptionsWCOW {
opts := &OptionsWCOW{
Options: &Options{
ID: id,
Owner: owner,
MemorySizeInMB: 1024,
AllowOvercommit: true,
EnableDeferredCommit: false,
ProcessorCount: defaultProcessorCount(),
},
}
if opts.ID == "" {
opts.ID = guid.New().String()
}
if opts.Owner == "" {
opts.Owner = filepath.Base(os.Args[0])
}
return opts
}
// CreateWCOW creates an HCS compute system representing a utility VM.
//
// WCOW Notes:
// - The scratch is always attached to SCSI 0:0
//
func CreateWCOW(opts *OptionsWCOW) (_ *UtilityVM, err error) {
logrus.Debugf("uvm::CreateWCOW %+v", opts)
if opts.Options == nil {
opts.Options = &Options{}
}
uvm := &UtilityVM{
id: opts.ID,
owner: opts.Owner,
operatingSystem: "windows",
scsiControllerCount: 1,
vsmbShares: make(map[string]*vsmbShare),
}
if len(opts.LayerFolders) < 2 {
return nil, fmt.Errorf("at least 2 LayerFolders must be supplied")
}
uvmFolder, err := uvmfolder.LocateUVMFolder(opts.LayerFolders)
if err != nil {
return nil, fmt.Errorf("failed to locate utility VM folder from layer folders: %s", err)
}
// TODO: BUGBUG Remove this. @jhowardmsft
// It should be the responsiblity of the caller to do the creation and population.
// - Update runhcs too (vm.go).
// - Remove comment in function header
// - Update tests that rely on this current behaviour.
// Create the RW scratch in the top-most layer folder, creating the folder if it doesn't already exist.
scratchFolder := opts.LayerFolders[len(opts.LayerFolders)-1]
logrus.Debugf("uvm::CreateWCOW scratch folder: %s", scratchFolder)
// Create the directory if it doesn't exist
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
logrus.Debugf("uvm::CreateWCOW creating folder: %s ", scratchFolder)
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
return nil, fmt.Errorf("failed to create utility VM scratch folder: %s", err)
}
}
// Create sandbox.vhdx in the scratch folder based on the template, granting the correct permissions to it
scratchPath := filepath.Join(scratchFolder, "sandbox.vhdx")
if _, err := os.Stat(scratchPath); os.IsNotExist(err) {
if err := wcow.CreateUVMScratch(uvmFolder, scratchFolder, uvm.id); err != nil {
return nil, fmt.Errorf("failed to create scratch: %s", err)
}
}
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{
Uefi: &hcsschema.Uefi{
BootThis: &hcsschema.UefiBootEntry{
DevicePath: `\EFI\Microsoft\Boot\bootmgfw.efi`,
DeviceType: "VmbFs",
},
},
},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: opts.MemorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
// EnableHotHint is not compatible with physical.
EnableHotHint: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
},
Processor: &hcsschema.Processor2{
Count: defaultProcessorCount(),
},
},
GuestConnection: &hcsschema.GuestConnection{},
Devices: &hcsschema.Devices{
Scsi: map[string]hcsschema.Scsi{
"0": {
Attachments: map[string]hcsschema.Attachment{
"0": {
Path: scratchPath,
Type_: "VirtualDisk",
},
},
},
},
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
},
},
VirtualSmb: &hcsschema.VirtualSmb{
DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere
Shares: []hcsschema.VirtualSmbShare{
{
Name: "os",
Path: filepath.Join(uvmFolder, `UtilityVM\Files`),
Options: &hcsschema.VirtualSmbShareOptions{
ReadOnly: true,
PseudoOplocks: true,
TakeBackupPrivilege: true,
CacheIo: true,
ShareRead: true,
},
},
},
},
},
},
}
uvm.scsiLocations[0][0].hostPath = doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
if err != nil {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
}
hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc)
if err != nil {
logrus.Debugln("failed to create UVM: ", err)
return nil, err
}
uvm.hcsSystem = hcsSystem
return uvm, nil
}

View File

@ -1,6 +0,0 @@
package uvm
// Modifies the compute system by sending a request to HCS
func (uvm *UtilityVM) Modify(hcsModificationDocument interface{}) error {
return uvm.hcsSystem.Modify(hcsModificationDocument)
}

Some files were not shown because too many files have changed in this diff Show More