Windows: Updates Windows Vendoring

Updates windows dependent libraries for vendoing.
This commit is contained in:
Nathan Gieseker
2019-01-23 18:43:18 -08:00
parent a686cc4bd8
commit 9a429d8d25
839 changed files with 282895 additions and 774 deletions

View File

@ -0,0 +1,19 @@
package uvm
import "fmt"
const (
// MaxVPMEMCount is the maximum number of VPMem devices that may be added to an LCOW
// utility VM
MaxVPMEMCount = 128
// DefaultVPMEMCount is the default number of VPMem devices that may be added to an LCOW
// utility VM if the create request doesn't specify how many.
DefaultVPMEMCount = 64
// DefaultVPMemSizeBytes is the default size of a VPMem device if the create request
// doesn't specify.
DefaultVPMemSizeBytes = 4 * 1024 * 1024 * 1024 // 4GB
)
var errNotSupported = fmt.Errorf("not supported")

View File

@ -0,0 +1,11 @@
package uvm
import (
"sync/atomic"
)
// ContainerCounter is used for where we layout things for a container in
// a utility VM. For WCOW it'll be C:\c\N\. For LCOW it'll be /run/gcs/c/N/.
func (uvm *UtilityVM) ContainerCounter() uint64 {
return atomic.AddUint64(&uvm.containerCounter, 1)
}

View File

@ -0,0 +1,62 @@
package uvm
import (
"runtime"
)
// Options are the set of options passed to Create() to create a utility vm.
type Options struct {
ID string // Identifier for the uvm. Defaults to generated GUID.
Owner string // Specifies the owner. Defaults to executable name.
AdditionHCSDocumentJSON string // Optional additional JSON to merge into the HCS document prior
// MemorySizeInMB sets the UVM memory. If `0` will default to platform
// default.
MemorySizeInMB int32
// Memory for UVM. Defaults to true. For physical backed memory, set to
// false.
AllowOvercommit bool
// Memory for UVM. Defaults to false. For virtual memory with deferred
// commit, set to true.
EnableDeferredCommit bool
// ProcessorCount sets the number of vCPU's. If `0` will default to platform
// default.
ProcessorCount int32
}
// ID returns the ID of the VM's compute system.
func (uvm *UtilityVM) ID() string {
return uvm.hcsSystem.ID()
}
// OS returns the operating system of the utility VM.
func (uvm *UtilityVM) OS() string {
return uvm.operatingSystem
}
// Close terminates and releases resources associated with the utility VM.
func (uvm *UtilityVM) Close() error {
uvm.Terminate()
// outputListener will only be nil for a Create -> Stop without a Start. In
// this case we have no goroutine processing output so its safe to close the
// channel here.
if uvm.outputListener != nil {
close(uvm.outputProcessingDone)
uvm.outputListener.Close()
uvm.outputListener = nil
}
err := uvm.hcsSystem.Close()
uvm.hcsSystem = nil
return err
}
func defaultProcessorCount() int32 {
if runtime.NumCPU() == 1 {
return 1
}
return 2
}

View File

@ -0,0 +1,361 @@
package uvm
import (
"encoding/binary"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/mergemaps"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/Microsoft/hcsshim/osversion"
"github.com/linuxkit/virtsock/pkg/hvsock"
"github.com/sirupsen/logrus"
)
type PreferredRootFSType int
const (
PreferredRootFSTypeInitRd PreferredRootFSType = iota
PreferredRootFSTypeVHD
)
// OutputHandler is used to process the output from the program run in the UVM.
type OutputHandler func(io.Reader)
const (
// InitrdFile is the default file name for an initrd.img used to boot LCOW.
InitrdFile = "initrd.img"
// VhdFile is the default file name for a rootfs.vhd used to boot LCOW.
VhdFile = "rootfs.vhd"
)
// OptionsLCOW are the set of options passed to CreateLCOW() to create a utility vm.
type OptionsLCOW struct {
*Options
BootFilesPath string // Folder in which kernel and root file system reside. Defaults to \Program Files\Linux Containers
KernelFile string // Filename under `BootFilesPath` for the kernel. Defaults to `kernel`
KernelDirect bool // Skip UEFI and boot directly to `kernel`
RootFSFile string // Filename under `BootFilesPath` for the UVMs root file system. Defaults to `InitrdFile`
KernelBootOptions string // Additional boot options for the kernel
EnableGraphicsConsole bool // If true, enable a graphics console for the utility VM
ConsolePipe string // The named pipe path to use for the serial console. eg \\.\pipe\vmpipe
SCSIControllerCount uint32 // The number of SCSI controllers. Defaults to 1. Currently we only support 0 or 1.
UseGuestConnection bool // Whether the HCS should connect to the UVM's GCS. Defaults to true
ExecCommandLine string // The command line to exec from init. Defaults to GCS
ForwardStdout bool // Whether stdout will be forwarded from the executed program. Defaults to false
ForwardStderr bool // Whether stderr will be forwarded from the executed program. Defaults to true
OutputHandler OutputHandler `json:"-"` // Controls how output received over HVSocket from the UVM is handled. Defaults to parsing output as logrus messages
VPMemDeviceCount uint32 // Number of VPMem devices. Defaults to `DefaultVPMEMCount`. Limit at 128. If booting UVM from VHD, device 0 is taken.
VPMemSizeBytes uint64 // Size of the VPMem devices. Defaults to `DefaultVPMemSizeBytes`.
PreferredRootFSType PreferredRootFSType // If `KernelFile` is `InitrdFile` use `PreferredRootFSTypeInitRd`. If `KernelFile` is `VhdFile` use `PreferredRootFSTypeVHD`
}
// NewDefaultOptionsLCOW creates the default options for a bootable version of
// LCOW.
//
// `id` the ID of the compute system. If not passed will generate a new GUID.
//
// `owner` the owner of the compute system. If not passed will use the
// executable files name.
func NewDefaultOptionsLCOW(id, owner string) *OptionsLCOW {
opts := &OptionsLCOW{
Options: &Options{
ID: id,
Owner: owner,
MemorySizeInMB: 1024,
AllowOvercommit: true,
EnableDeferredCommit: false,
ProcessorCount: defaultProcessorCount(),
},
BootFilesPath: filepath.Join(os.Getenv("ProgramFiles"), "Linux Containers"),
KernelFile: "kernel",
KernelDirect: osversion.Get().Build >= 18286, // Use KernelDirect boot by default on all builds that support it.
RootFSFile: InitrdFile,
KernelBootOptions: "",
EnableGraphicsConsole: false,
ConsolePipe: "",
SCSIControllerCount: 1,
UseGuestConnection: true,
ExecCommandLine: fmt.Sprintf("/bin/gcs -log-format json -loglevel %s", logrus.StandardLogger().Level.String()),
ForwardStdout: false,
ForwardStderr: true,
OutputHandler: parseLogrus,
VPMemDeviceCount: DefaultVPMEMCount,
VPMemSizeBytes: DefaultVPMemSizeBytes,
PreferredRootFSType: PreferredRootFSTypeInitRd,
}
if opts.ID == "" {
opts.ID = guid.New().String()
}
if opts.Owner == "" {
opts.Owner = filepath.Base(os.Args[0])
}
if _, err := os.Stat(filepath.Join(opts.BootFilesPath, VhdFile)); err == nil {
// We have a rootfs.vhd in the boot files path. Use it over an initrd.img
opts.RootFSFile = VhdFile
opts.PreferredRootFSType = PreferredRootFSTypeVHD
}
return opts
}
const linuxLogVsockPort = 109
// CreateLCOW creates an HCS compute system representing a utility VM.
func CreateLCOW(opts *OptionsLCOW) (_ *UtilityVM, err error) {
logrus.Debugf("uvm::CreateLCOW %+v", opts)
// We dont serialize OutputHandler so if it is missing we need to put it back to the default.
if opts.OutputHandler == nil {
opts.OutputHandler = parseLogrus
}
uvm := &UtilityVM{
id: opts.ID,
owner: opts.Owner,
operatingSystem: "linux",
scsiControllerCount: opts.SCSIControllerCount,
vpmemMaxCount: opts.VPMemDeviceCount,
vpmemMaxSizeBytes: opts.VPMemSizeBytes,
}
kernelFullPath := filepath.Join(opts.BootFilesPath, opts.KernelFile)
if _, err := os.Stat(kernelFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("kernel: '%s' not found", kernelFullPath)
}
rootfsFullPath := filepath.Join(opts.BootFilesPath, opts.RootFSFile)
if _, err := os.Stat(rootfsFullPath); os.IsNotExist(err) {
return nil, fmt.Errorf("boot file: '%s' not found", rootfsFullPath)
}
if opts.SCSIControllerCount > 1 {
return nil, fmt.Errorf("SCSI controller count must be 0 or 1") // Future extension here for up to 4
}
if opts.VPMemDeviceCount > MaxVPMEMCount {
return nil, fmt.Errorf("vpmem device count cannot be greater than %d", MaxVPMEMCount)
}
if uvm.vpmemMaxCount > 0 {
if opts.VPMemSizeBytes%4096 != 0 {
return nil, fmt.Errorf("opts.VPMemSizeBytes must be a multiple of 4096")
}
} else {
if opts.PreferredRootFSType == PreferredRootFSTypeVHD {
return nil, fmt.Errorf("PreferredRootFSTypeVHD requires at least one VPMem device")
}
}
if opts.KernelDirect && osversion.Get().Build < 18286 {
return nil, fmt.Errorf("KernelDirectBoot is not support on builds older than 18286")
}
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: opts.MemorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
},
Processor: &hcsschema.Processor2{
Count: opts.ProcessorCount,
},
},
Devices: &hcsschema.Devices{
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
},
},
},
},
}
if opts.UseGuestConnection {
doc.VirtualMachine.GuestConnection = &hcsschema.GuestConnection{
UseVsock: true,
UseConnectedSuspend: true,
}
}
if uvm.scsiControllerCount > 0 {
// TODO: JTERRY75 - this should enumerate scsicount and add an entry per value.
doc.VirtualMachine.Devices.Scsi = map[string]hcsschema.Scsi{
"0": {
Attachments: make(map[string]hcsschema.Attachment),
},
}
}
if uvm.vpmemMaxCount > 0 {
doc.VirtualMachine.Devices.VirtualPMem = &hcsschema.VirtualPMemController{
MaximumCount: uvm.vpmemMaxCount,
MaximumSizeBytes: uvm.vpmemMaxSizeBytes,
}
}
var kernelArgs string
switch opts.PreferredRootFSType {
case PreferredRootFSTypeInitRd:
if !opts.KernelDirect {
kernelArgs = "initrd=/" + opts.RootFSFile
}
case PreferredRootFSTypeVHD:
// Support for VPMem VHD(X) booting rather than initrd..
kernelArgs = "root=/dev/pmem0 ro init=/init"
imageFormat := "Vhd1"
if strings.ToLower(filepath.Ext(opts.RootFSFile)) == "vhdx" {
imageFormat = "Vhdx"
}
doc.VirtualMachine.Devices.VirtualPMem.Devices = map[string]hcsschema.VirtualPMemDevice{
"0": {
HostPath: rootfsFullPath,
ReadOnly: true,
ImageFormat: imageFormat,
},
}
if err := wclayer.GrantVmAccess(uvm.id, rootfsFullPath); err != nil {
return nil, fmt.Errorf("failed to grantvmaccess to %s: %s", rootfsFullPath, err)
}
// Add to our internal structure
uvm.vpmemDevices[0] = vpmemInfo{
hostPath: opts.RootFSFile,
uvmPath: "/",
refCount: 1,
}
}
vmDebugging := false
if opts.ConsolePipe != "" {
vmDebugging = true
kernelArgs += " 8250_core.nr_uarts=1 8250_core.skip_txen_test=1 console=ttyS0,115200"
doc.VirtualMachine.Devices.ComPorts = map[string]hcsschema.ComPort{
"0": { // Which is actually COM1
NamedPipe: opts.ConsolePipe,
},
}
} else {
kernelArgs += " 8250_core.nr_uarts=0"
}
if opts.EnableGraphicsConsole {
vmDebugging = true
kernelArgs += " console=tty"
doc.VirtualMachine.Devices.Keyboard = &hcsschema.Keyboard{}
doc.VirtualMachine.Devices.EnhancedModeVideo = &hcsschema.EnhancedModeVideo{}
doc.VirtualMachine.Devices.VideoMonitor = &hcsschema.VideoMonitor{}
}
if !vmDebugging {
// Terminate the VM if there is a kernel panic.
kernelArgs += " panic=-1 quiet"
}
if opts.KernelBootOptions != "" {
kernelArgs += " " + opts.KernelBootOptions
}
// With default options, run GCS with stderr pointing to the vsock port
// created below in order to forward guest logs to logrus.
initArgs := "/bin/vsockexec"
if opts.ForwardStdout {
initArgs += fmt.Sprintf(" -o %d", linuxLogVsockPort)
}
if opts.ForwardStderr {
initArgs += fmt.Sprintf(" -e %d", linuxLogVsockPort)
}
initArgs += " " + opts.ExecCommandLine
if vmDebugging {
// Launch a shell on the console.
initArgs = `sh -c "` + initArgs + ` & exec sh"`
}
kernelArgs += ` pci=off brd.rd_nr=0 pmtmr=0 -- ` + initArgs
if !opts.KernelDirect {
doc.VirtualMachine.Chipset.Uefi = &hcsschema.Uefi{
BootThis: &hcsschema.UefiBootEntry{
DevicePath: `\` + opts.KernelFile,
DeviceType: "VmbFs",
VmbFsRootPath: opts.BootFilesPath,
OptionalData: kernelArgs,
},
}
} else {
doc.VirtualMachine.Chipset.LinuxKernelDirect = &hcsschema.LinuxKernelDirect{
KernelFilePath: kernelFullPath,
KernelCmdLine: kernelArgs,
}
if opts.PreferredRootFSType == PreferredRootFSTypeInitRd {
doc.VirtualMachine.Chipset.LinuxKernelDirect.InitRdPath = rootfsFullPath
}
}
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
if err != nil {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
}
hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc)
if err != nil {
logrus.Debugln("failed to create UVM: ", err)
return nil, err
}
uvm.hcsSystem = hcsSystem
defer func() {
if err != nil {
uvm.Close()
}
}()
// Create a socket that the executed program can send to. This is usually
// used by GCS to send log data.
if opts.ForwardStdout || opts.ForwardStderr {
uvm.outputHandler = opts.OutputHandler
uvm.outputProcessingDone = make(chan struct{})
uvm.outputListener, err = uvm.listenVsock(linuxLogVsockPort)
if err != nil {
return nil, err
}
}
return uvm, nil
}
func (uvm *UtilityVM) listenVsock(port uint32) (net.Listener, error) {
properties, err := uvm.hcsSystem.Properties()
if err != nil {
return nil, err
}
vmID, err := hvsock.GUIDFromString(properties.RuntimeID)
if err != nil {
return nil, err
}
serviceID, _ := hvsock.GUIDFromString("00000000-facb-11e6-bd58-64006a7986d3")
binary.LittleEndian.PutUint32(serviceID[0:4], port)
return hvsock.Listen(hvsock.Addr{VMID: vmID, ServiceID: serviceID})
}
// PMemMaxSizeBytes returns the maximum size of a PMEM layer (LCOW)
func (uvm *UtilityVM) PMemMaxSizeBytes() uint64 {
return uvm.vpmemMaxSizeBytes
}

View File

@ -0,0 +1,25 @@
package uvm
import (
"testing"
)
// Unit tests for negative testing of input to uvm.Create()
func TestCreateBadBootFilesPath(t *testing.T) {
opts := NewDefaultOptionsLCOW(t.Name(), "")
opts.BootFilesPath = `c:\does\not\exist\I\hope`
_, err := CreateLCOW(opts)
if err == nil || err.Error() != `kernel: 'c:\does\not\exist\I\hope\kernel' not found` {
t.Fatal(err)
}
}
func TestCreateWCOWBadLayerFolders(t *testing.T) {
opts := NewDefaultOptionsWCOW(t.Name(), "")
_, err := CreateWCOW(opts)
if err == nil || (err != nil && err.Error() != `at least 2 LayerFolders must be supplied`) {
t.Fatal(err)
}
}

View File

@ -0,0 +1,186 @@
package uvm
import (
"fmt"
"os"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/mergemaps"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/schemaversion"
"github.com/Microsoft/hcsshim/internal/uvmfolder"
"github.com/Microsoft/hcsshim/internal/wcow"
"github.com/sirupsen/logrus"
)
// OptionsWCOW are the set of options passed to CreateWCOW() to create a utility vm.
type OptionsWCOW struct {
*Options
LayerFolders []string // Set of folders for base layers and scratch. Ordered from top most read-only through base read-only layer, followed by scratch
}
// NewDefaultOptionsWCOW creates the default options for a bootable version of
// WCOW. The caller `MUST` set the `LayerFolders` path on the returned value.
//
// `id` the ID of the compute system. If not passed will generate a new GUID.
//
// `owner` the owner of the compute system. If not passed will use the
// executable files name.
func NewDefaultOptionsWCOW(id, owner string) *OptionsWCOW {
opts := &OptionsWCOW{
Options: &Options{
ID: id,
Owner: owner,
MemorySizeInMB: 1024,
AllowOvercommit: true,
EnableDeferredCommit: false,
ProcessorCount: defaultProcessorCount(),
},
}
if opts.ID == "" {
opts.ID = guid.New().String()
}
if opts.Owner == "" {
opts.Owner = filepath.Base(os.Args[0])
}
return opts
}
// CreateWCOW creates an HCS compute system representing a utility VM.
//
// WCOW Notes:
// - The scratch is always attached to SCSI 0:0
//
func CreateWCOW(opts *OptionsWCOW) (_ *UtilityVM, err error) {
logrus.Debugf("uvm::CreateWCOW %+v", opts)
if opts.Options == nil {
opts.Options = &Options{}
}
uvm := &UtilityVM{
id: opts.ID,
owner: opts.Owner,
operatingSystem: "windows",
scsiControllerCount: 1,
vsmbShares: make(map[string]*vsmbShare),
}
if len(opts.LayerFolders) < 2 {
return nil, fmt.Errorf("at least 2 LayerFolders must be supplied")
}
uvmFolder, err := uvmfolder.LocateUVMFolder(opts.LayerFolders)
if err != nil {
return nil, fmt.Errorf("failed to locate utility VM folder from layer folders: %s", err)
}
// TODO: BUGBUG Remove this. @jhowardmsft
// It should be the responsiblity of the caller to do the creation and population.
// - Update runhcs too (vm.go).
// - Remove comment in function header
// - Update tests that rely on this current behaviour.
// Create the RW scratch in the top-most layer folder, creating the folder if it doesn't already exist.
scratchFolder := opts.LayerFolders[len(opts.LayerFolders)-1]
logrus.Debugf("uvm::CreateWCOW scratch folder: %s", scratchFolder)
// Create the directory if it doesn't exist
if _, err := os.Stat(scratchFolder); os.IsNotExist(err) {
logrus.Debugf("uvm::CreateWCOW creating folder: %s ", scratchFolder)
if err := os.MkdirAll(scratchFolder, 0777); err != nil {
return nil, fmt.Errorf("failed to create utility VM scratch folder: %s", err)
}
}
// Create sandbox.vhdx in the scratch folder based on the template, granting the correct permissions to it
scratchPath := filepath.Join(scratchFolder, "sandbox.vhdx")
if _, err := os.Stat(scratchPath); os.IsNotExist(err) {
if err := wcow.CreateUVMScratch(uvmFolder, scratchFolder, uvm.id); err != nil {
return nil, fmt.Errorf("failed to create scratch: %s", err)
}
}
doc := &hcsschema.ComputeSystem{
Owner: uvm.owner,
SchemaVersion: schemaversion.SchemaV21(),
ShouldTerminateOnLastHandleClosed: true,
VirtualMachine: &hcsschema.VirtualMachine{
StopOnReset: true,
Chipset: &hcsschema.Chipset{
Uefi: &hcsschema.Uefi{
BootThis: &hcsschema.UefiBootEntry{
DevicePath: `\EFI\Microsoft\Boot\bootmgfw.efi`,
DeviceType: "VmbFs",
},
},
},
ComputeTopology: &hcsschema.Topology{
Memory: &hcsschema.Memory2{
SizeInMB: opts.MemorySizeInMB,
AllowOvercommit: opts.AllowOvercommit,
// EnableHotHint is not compatible with physical.
EnableHotHint: opts.AllowOvercommit,
EnableDeferredCommit: opts.EnableDeferredCommit,
},
Processor: &hcsschema.Processor2{
Count: defaultProcessorCount(),
},
},
GuestConnection: &hcsschema.GuestConnection{},
Devices: &hcsschema.Devices{
Scsi: map[string]hcsschema.Scsi{
"0": {
Attachments: map[string]hcsschema.Attachment{
"0": {
Path: scratchPath,
Type_: "VirtualDisk",
},
},
},
},
HvSocket: &hcsschema.HvSocket2{
HvSocketConfig: &hcsschema.HvSocketSystemConfig{
// Allow administrators and SYSTEM to bind to vsock sockets
// so that we can create a GCS log socket.
DefaultBindSecurityDescriptor: "D:P(A;;FA;;;SY)(A;;FA;;;BA)",
},
},
VirtualSmb: &hcsschema.VirtualSmb{
DirectFileMappingInMB: 1024, // Sensible default, but could be a tuning parameter somewhere
Shares: []hcsschema.VirtualSmbShare{
{
Name: "os",
Path: filepath.Join(uvmFolder, `UtilityVM\Files`),
Options: &hcsschema.VirtualSmbShareOptions{
ReadOnly: true,
PseudoOplocks: true,
TakeBackupPrivilege: true,
CacheIo: true,
ShareRead: true,
},
},
},
},
},
},
}
uvm.scsiLocations[0][0].hostPath = doc.VirtualMachine.Devices.Scsi["0"].Attachments["0"].Path
fullDoc, err := mergemaps.MergeJSON(doc, ([]byte)(opts.AdditionHCSDocumentJSON))
if err != nil {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", opts.AdditionHCSDocumentJSON, err)
}
hcsSystem, err := hcs.CreateComputeSystem(uvm.id, fullDoc)
if err != nil {
logrus.Debugln("failed to create UVM: ", err)
return nil, err
}
uvm.hcsSystem = hcsSystem
return uvm, nil
}

View File

@ -0,0 +1,6 @@
package uvm
// Modifies the compute system by sending a request to HCS
func (uvm *UtilityVM) Modify(hcsModificationDocument interface{}) error {
return uvm.hcsSystem.Modify(hcsModificationDocument)
}

View File

@ -0,0 +1,251 @@
package uvm
import (
"fmt"
"path"
"github.com/Microsoft/hcsshim/hcn"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
// AddNetNS adds network namespace inside the guest & adds endpoints to the guest on that namepace
func (uvm *UtilityVM) AddNetNS(id string, endpoints []*hns.HNSEndpoint) (err error) {
uvm.m.Lock()
defer uvm.m.Unlock()
ns := uvm.namespaces[id]
if ns == nil {
ns = &namespaceInfo{}
if uvm.isNetworkNamespaceSupported() {
// Add a Guest Network namespace. On LCOW we add the adapters
// dynamically.
if uvm.operatingSystem == "windows" {
hcnNamespace, err := hcn.GetNamespaceByID(id)
if err != nil {
return err
}
guestNamespace := hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeNetworkNamespace,
RequestType: requesttype.Add,
Settings: hcnNamespace,
},
}
if err := uvm.Modify(&guestNamespace); err != nil {
return err
}
}
}
defer func() {
if err != nil {
if e := uvm.removeNamespaceNICs(ns); e != nil {
logrus.Warnf("failed to undo NIC add: %v", e)
}
}
}()
for _, endpoint := range endpoints {
nicID := guid.New()
err = uvm.addNIC(nicID, endpoint)
if err != nil {
return err
}
ns.nics = append(ns.nics, nicInfo{nicID, endpoint})
}
if uvm.namespaces == nil {
uvm.namespaces = make(map[string]*namespaceInfo)
}
uvm.namespaces[id] = ns
}
ns.refCount++
return nil
}
//RemoveNetNS removes the namespace information
func (uvm *UtilityVM) RemoveNetNS(id string) error {
uvm.m.Lock()
defer uvm.m.Unlock()
ns := uvm.namespaces[id]
if ns == nil || ns.refCount <= 0 {
panic(fmt.Errorf("removed a namespace that was not added: %s", id))
}
ns.refCount--
// Remove the Guest Network namespace
if uvm.isNetworkNamespaceSupported() {
if uvm.operatingSystem == "windows" {
hcnNamespace, err := hcn.GetNamespaceByID(id)
if err != nil {
return err
}
guestNamespace := hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeNetworkNamespace,
RequestType: requesttype.Remove,
Settings: hcnNamespace,
},
}
if err := uvm.Modify(&guestNamespace); err != nil {
return err
}
}
}
var err error
if ns.refCount == 0 {
err = uvm.removeNamespaceNICs(ns)
delete(uvm.namespaces, id)
}
return err
}
// IsNetworkNamespaceSupported returns bool value specifying if network namespace is supported inside the guest
func (uvm *UtilityVM) isNetworkNamespaceSupported() bool {
p, err := uvm.ComputeSystem().Properties(schema1.PropertyTypeGuestConnection)
if err == nil {
return p.GuestConnectionInfo.GuestDefinedCapabilities.NamespaceAddRequestSupported
}
return false
}
func (uvm *UtilityVM) removeNamespaceNICs(ns *namespaceInfo) error {
for len(ns.nics) != 0 {
nic := ns.nics[len(ns.nics)-1]
err := uvm.removeNIC(nic.ID, nic.Endpoint)
if err != nil {
return err
}
ns.nics = ns.nics[:len(ns.nics)-1]
}
return nil
}
func getNetworkModifyRequest(adapterID string, requestType string, settings interface{}) interface{} {
if osversion.Get().Build >= osversion.RS5 {
return guestrequest.NetworkModifyRequest{
AdapterId: adapterID,
RequestType: requestType,
Settings: settings,
}
}
return guestrequest.RS4NetworkModifyRequest{
AdapterInstanceId: adapterID,
RequestType: requestType,
Settings: settings,
}
}
func (uvm *UtilityVM) addNIC(id guid.GUID, endpoint *hns.HNSEndpoint) error {
// First a pre-add. This is a guest-only request and is only done on Windows.
if uvm.operatingSystem == "windows" {
preAddRequest := hcsschema.ModifySettingRequest{
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeNetwork,
RequestType: requesttype.Add,
Settings: getNetworkModifyRequest(
id.String(),
requesttype.PreAdd,
endpoint),
},
}
if err := uvm.Modify(&preAddRequest); err != nil {
return err
}
}
// Then the Add itself
request := hcsschema.ModifySettingRequest{
RequestType: requesttype.Add,
ResourcePath: path.Join("VirtualMachine/Devices/NetworkAdapters", id.String()),
Settings: hcsschema.NetworkAdapter{
EndpointId: endpoint.Id,
MacAddress: endpoint.MacAddress,
},
}
if uvm.operatingSystem == "windows" {
request.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeNetwork,
RequestType: requesttype.Add,
Settings: getNetworkModifyRequest(
id.String(),
requesttype.Add,
nil),
}
} else {
// Verify this version of LCOW supports Network HotAdd
if uvm.isNetworkNamespaceSupported() {
request.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeNetwork,
RequestType: requesttype.Add,
Settings: &guestrequest.LCOWNetworkAdapter{
NamespaceID: endpoint.Namespace.ID,
ID: id.String(),
MacAddress: endpoint.MacAddress,
IPAddress: endpoint.IPAddress.String(),
PrefixLength: endpoint.PrefixLength,
GatewayAddress: endpoint.GatewayAddress,
DNSSuffix: endpoint.DNSSuffix,
DNSServerList: endpoint.DNSServerList,
EnableLowMetric: endpoint.EnableLowMetric,
EncapOverhead: endpoint.EncapOverhead,
},
}
}
}
if err := uvm.Modify(&request); err != nil {
return err
}
return nil
}
func (uvm *UtilityVM) removeNIC(id guid.GUID, endpoint *hns.HNSEndpoint) error {
request := hcsschema.ModifySettingRequest{
RequestType: requesttype.Remove,
ResourcePath: path.Join("VirtualMachine/Devices/NetworkAdapters", id.String()),
Settings: hcsschema.NetworkAdapter{
EndpointId: endpoint.Id,
MacAddress: endpoint.MacAddress,
},
}
if uvm.operatingSystem == "windows" {
request.GuestRequest = hcsschema.ModifySettingRequest{
RequestType: requesttype.Remove,
Settings: getNetworkModifyRequest(
id.String(),
requesttype.Remove,
nil),
}
} else {
// Verify this version of LCOW supports Network HotRemove
if uvm.isNetworkNamespaceSupported() {
request.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeNetwork,
RequestType: requesttype.Remove,
Settings: &guestrequest.LCOWNetworkAdapter{
NamespaceID: endpoint.Namespace.ID,
ID: endpoint.Id,
},
}
}
}
if err := uvm.Modify(&request); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,133 @@
package uvm
import (
"fmt"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/sirupsen/logrus"
)
// AddPlan9 adds a Plan9 share to a utility VM. Each Plan9 share is ref-counted and
// only added if it isn't already.
func (uvm *UtilityVM) AddPlan9(hostPath string, uvmPath string, readOnly bool) error {
logrus.WithFields(logrus.Fields{
logfields.UVMID: uvm.id,
"host-path": hostPath,
"uvm-path": uvmPath,
"readOnly": readOnly,
}).Debug("uvm::AddPlan9")
if uvm.operatingSystem != "linux" {
return errNotSupported
}
if uvmPath == "" {
return fmt.Errorf("uvmPath must be passed to AddPlan9")
}
// TODO: JTERRY75 - These are marked private in the schema. For now use them
// but when there are public variants we need to switch to them.
const (
shareFlagsReadOnly int32 = 0x00000001
shareFlagsLinuxMetadata int32 = 0x00000004
shareFlagsCaseSensitive int32 = 0x00000008
)
flags := shareFlagsLinuxMetadata | shareFlagsCaseSensitive
if readOnly {
flags |= shareFlagsReadOnly
}
uvm.m.Lock()
defer uvm.m.Unlock()
if uvm.plan9Shares == nil {
uvm.plan9Shares = make(map[string]*plan9Info)
}
if _, ok := uvm.plan9Shares[hostPath]; !ok {
uvm.plan9Counter++
modification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Add,
Settings: hcsschema.Plan9Share{
Name: fmt.Sprintf("%d", uvm.plan9Counter),
Path: hostPath,
Port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999)
Flags: flags,
},
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Plan9/Shares"),
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeMappedDirectory,
RequestType: requesttype.Add,
Settings: guestrequest.LCOWMappedDirectory{
MountPath: uvmPath,
Port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999)
ReadOnly: readOnly,
},
},
}
if err := uvm.Modify(modification); err != nil {
return err
}
uvm.plan9Shares[hostPath] = &plan9Info{
refCount: 1,
uvmPath: uvmPath,
idCounter: uvm.plan9Counter,
port: int32(uvm.plan9Counter), // TODO: Temporary. Will all use a single port (9999)
}
} else {
uvm.plan9Shares[hostPath].refCount++
}
logrus.Debugf("hcsshim::AddPlan9 Success %s: refcount=%d %+v", hostPath, uvm.plan9Shares[hostPath].refCount, uvm.plan9Shares[hostPath])
return nil
}
// RemovePlan9 removes a Plan9 share from a utility VM. Each Plan9 share is ref-counted
// and only actually removed when the ref-count drops to zero.
func (uvm *UtilityVM) RemovePlan9(hostPath string) error {
if uvm.operatingSystem != "linux" {
return errNotSupported
}
logrus.Debugf("uvm::RemovePlan9 %s id:%s", hostPath, uvm.id)
uvm.m.Lock()
defer uvm.m.Unlock()
if _, ok := uvm.plan9Shares[hostPath]; !ok {
return fmt.Errorf("%s is not present as a Plan9 share in %s, cannot remove", hostPath, uvm.id)
}
return uvm.removePlan9(hostPath, uvm.plan9Shares[hostPath].uvmPath)
}
// removePlan9 is the internally callable "unsafe" version of RemovePlan9. The mutex
// MUST be held when calling this function.
func (uvm *UtilityVM) removePlan9(hostPath, uvmPath string) error {
uvm.plan9Shares[hostPath].refCount--
if uvm.plan9Shares[hostPath].refCount > 0 {
logrus.Debugf("uvm::RemovePlan9 Success %s id:%s Ref-count now %d. It is still present in the utility VM", hostPath, uvm.id, uvm.plan9Shares[hostPath].refCount)
return nil
}
logrus.Debugf("uvm::RemovePlan9 Zero ref-count, removing. %s id:%s", hostPath, uvm.id)
modification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Remove,
Settings: hcsschema.Plan9Share{
Name: fmt.Sprintf("%d", uvm.plan9Shares[hostPath].idCounter),
Port: uvm.plan9Shares[hostPath].port,
},
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Plan9/Shares"),
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeMappedDirectory,
RequestType: requesttype.Remove,
Settings: guestrequest.LCOWMappedDirectory{
MountPath: uvm.plan9Shares[hostPath].uvmPath,
Port: uvm.plan9Shares[hostPath].port,
},
},
}
if err := uvm.Modify(modification); err != nil {
return fmt.Errorf("failed to remove plan9 share %s from %s: %+v: %s", hostPath, uvm.id, modification, err)
}
delete(uvm.plan9Shares, hostPath)
logrus.Debugf("uvm::RemovePlan9 Success %s id:%s successfully removed from utility VM", hostPath, uvm.id)
return nil
}

View File

@ -0,0 +1,318 @@
package uvm
import (
"fmt"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/sirupsen/logrus"
)
var (
ErrNoAvailableLocation = fmt.Errorf("no available location")
ErrNotAttached = fmt.Errorf("not attached")
ErrAlreadyAttached = fmt.Errorf("already attached")
ErrNoSCSIControllers = fmt.Errorf("no SCSI controllers configured for this utility VM")
ErrTooManyAttachments = fmt.Errorf("too many SCSI attachments")
ErrSCSILayerWCOWUnsupported = fmt.Errorf("SCSI attached layers are not supported for WCOW")
)
// allocateSCSI finds the next available slot on the
// SCSI controllers associated with a utility VM to use.
// Lock must be held when calling this function
func (uvm *UtilityVM) allocateSCSI(hostPath string, uvmPath string, isLayer bool) (int, int32, error) {
for controller, luns := range uvm.scsiLocations {
for lun, si := range luns {
if si.hostPath == "" {
uvm.scsiLocations[controller][lun].hostPath = hostPath
uvm.scsiLocations[controller][lun].uvmPath = uvmPath
uvm.scsiLocations[controller][lun].isLayer = isLayer
if isLayer {
uvm.scsiLocations[controller][lun].refCount = 1
}
logrus.Debugf("uvm::allocateSCSI %d:%d %q %q", controller, lun, hostPath, uvmPath)
return controller, int32(lun), nil
}
}
}
return -1, -1, ErrNoAvailableLocation
}
func (uvm *UtilityVM) deallocateSCSI(controller int, lun int32) error {
uvm.m.Lock()
defer uvm.m.Unlock()
logrus.Debugf("uvm::deallocateSCSI %d:%d %+v", controller, lun, uvm.scsiLocations[controller][lun])
uvm.scsiLocations[controller][lun] = scsiInfo{}
return nil
}
// Lock must be held when calling this function.
func (uvm *UtilityVM) findSCSIAttachment(findThisHostPath string) (int, int32, string, error) {
for controller, luns := range uvm.scsiLocations {
for lun, si := range luns {
if si.hostPath == findThisHostPath {
logrus.Debugf("uvm::findSCSIAttachment %d:%d %+v", controller, lun, si)
return controller, int32(lun), si.uvmPath, nil
}
}
}
return -1, -1, "", ErrNotAttached
}
// AddSCSI adds a SCSI disk to a utility VM at the next available location. This
// function should be called for a RW/scratch layer or a passthrough vhd/vhdx.
// For read-only layers on LCOW as an alternate to PMEM for large layers, use
// AddSCSILayer instead.
//
// `hostPath` is required and must point to a vhd/vhdx path.
//
// `uvmPath` is optional.
//
// `readOnly` set to `true` if the vhd/vhdx should be attached read only.
func (uvm *UtilityVM) AddSCSI(hostPath string, uvmPath string, readOnly bool) (int, int32, error) {
logrus.WithFields(logrus.Fields{
logfields.UVMID: uvm.id,
"host-path": hostPath,
"uvm-path": uvmPath,
"readOnly": readOnly,
}).Debug("uvm::AddSCSI")
return uvm.addSCSIActual(hostPath, uvmPath, "VirtualDisk", false, readOnly)
}
// AddSCSIPhysicalDisk attaches a physical disk from the host directly to the
// Utility VM at the next available location.
//
// `hostPath` is required and `likely` start's with `\\.\PHYSICALDRIVE`.
//
// `uvmPath` is optional if a guest mount is not requested.
//
// `readOnly` set to `true` if the physical disk should be attached read only.
func (uvm *UtilityVM) AddSCSIPhysicalDisk(hostPath, uvmPath string, readOnly bool) (int, int32, error) {
logrus.WithFields(logrus.Fields{
logfields.UVMID: uvm.id,
"host-path": hostPath,
"uvm-path": uvmPath,
"readOnly": readOnly,
}).Debug("uvm::AddSCSIPhysicalDisk")
return uvm.addSCSIActual(hostPath, uvmPath, "PassThru", false, readOnly)
}
// AddSCSILayer adds a read-only layer disk to a utility VM at the next available
// location. This function is used by LCOW as an alternate to PMEM for large layers.
// The UVMPath will always be /tmp/S<controller>/<lun>.
func (uvm *UtilityVM) AddSCSILayer(hostPath string) (int, int32, error) {
logrus.WithFields(logrus.Fields{
logfields.UVMID: uvm.id,
"host-path": hostPath,
}).Debug("uvm::AddSCSILayer")
if uvm.operatingSystem == "windows" {
return -1, -1, ErrSCSILayerWCOWUnsupported
}
return uvm.addSCSIActual(hostPath, "", "VirtualDisk", true, true)
}
// addSCSIActual is the implementation behind the external functions AddSCSI and
// AddSCSILayer.
//
// We are in control of everything ourselves. Hence we have ref- counting and
// so-on tracking what SCSI locations are available or used.
//
// `hostPath` is required and may be a vhd/vhdx or physical disk path.
//
// `uvmPath` is optional, and `must` be empty for layers. If `!isLayer` and
// `uvmPath` is empty no guest modify will take place.
//
// `attachmentType` is required and `must` be `VirtualDisk` for vhd/vhdx
// attachments and `PassThru` for physical disk.
//
// `isLayer` indicates that this is a read-only (LCOW) layer VHD. This parameter
// `must not` be used for Windows.
//
// `readOnly` indicates the attachment should be added read only.
//
// Returns the controller ID (0..3) and LUN (0..63) where the disk is attached.
func (uvm *UtilityVM) addSCSIActual(hostPath, uvmPath, attachmentType string, isLayer, readOnly bool) (int, int32, error) {
if uvm.scsiControllerCount == 0 {
return -1, -1, ErrNoSCSIControllers
}
// Ensure the utility VM has access
if err := wclayer.GrantVmAccess(uvm.ID(), hostPath); err != nil {
return -1, -1, err
}
// We must hold the lock throughout the lookup (findSCSIAttachment) until
// after the possible allocation (allocateSCSI) has been completed to ensure
// there isn't a race condition for it being attached by another thread between
// these two operations. All failure paths between these two must release
// the lock.
uvm.m.Lock()
if controller, lun, _, err := uvm.findSCSIAttachment(hostPath); err == nil {
// So is attached
if isLayer {
// Increment the refcount
uvm.scsiLocations[controller][lun].refCount++
logrus.Debugf("uvm::AddSCSI id:%s hostPath:%s refCount now %d", uvm.id, hostPath, uvm.scsiLocations[controller][lun].refCount)
uvm.m.Unlock()
return controller, int32(lun), nil
}
uvm.m.Unlock()
return -1, -1, ErrAlreadyAttached
}
// At this point, we know it's not attached, regardless of whether it's a
// ref-counted layer VHD, or not.
controller, lun, err := uvm.allocateSCSI(hostPath, uvmPath, isLayer)
if err != nil {
uvm.m.Unlock()
return -1, -1, err
}
// Auto-generate the UVM path for LCOW layers
if isLayer {
uvmPath = fmt.Sprintf("/tmp/S%d/%d", controller, lun)
}
// See comment higher up. Now safe to release the lock.
uvm.m.Unlock()
// Note: Can remove this check post-RS5 if multiple controllers are supported
if controller > 0 {
uvm.deallocateSCSI(controller, lun)
return -1, -1, ErrTooManyAttachments
}
SCSIModification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Add,
Settings: hcsschema.Attachment{
Path: hostPath,
Type_: attachmentType,
ReadOnly: readOnly,
},
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Scsi/%d/Attachments/%d", controller, lun),
}
if uvmPath != "" {
if uvm.operatingSystem == "windows" {
SCSIModification.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
RequestType: requesttype.Add,
Settings: guestrequest.WCOWMappedVirtualDisk{
ContainerPath: uvmPath,
Lun: lun,
},
}
} else {
SCSIModification.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
RequestType: requesttype.Add,
Settings: guestrequest.LCOWMappedVirtualDisk{
MountPath: uvmPath,
Lun: uint8(lun),
Controller: uint8(controller),
ReadOnly: readOnly,
},
}
}
}
if err := uvm.Modify(SCSIModification); err != nil {
uvm.deallocateSCSI(controller, lun)
return -1, -1, fmt.Errorf("uvm::AddSCSI: failed to modify utility VM configuration: %s", err)
}
logrus.Debugf("uvm::AddSCSI id:%s hostPath:%s added at %d:%d", uvm.id, hostPath, controller, lun)
return controller, int32(lun), nil
}
// RemoveSCSI removes a SCSI disk from a utility VM. As an external API, it
// is "safe". Internal use can call removeSCSI.
func (uvm *UtilityVM) RemoveSCSI(hostPath string) error {
uvm.m.Lock()
defer uvm.m.Unlock()
if uvm.scsiControllerCount == 0 {
return ErrNoSCSIControllers
}
// Make sure is actually attached
controller, lun, uvmPath, err := uvm.findSCSIAttachment(hostPath)
if err != nil {
return err
}
if uvm.scsiLocations[controller][lun].isLayer {
uvm.scsiLocations[controller][lun].refCount--
if uvm.scsiLocations[controller][lun].refCount > 0 {
logrus.Debugf("uvm::RemoveSCSI: refCount now %d: %s %s %d:%d", uvm.scsiLocations[controller][lun].refCount, hostPath, uvm.id, controller, lun)
return nil
}
}
if err := uvm.removeSCSI(hostPath, uvmPath, controller, lun); err != nil {
return fmt.Errorf("failed to remove SCSI disk %s from container %s: %s", hostPath, uvm.id, err)
}
return nil
}
// removeSCSI is the internally callable "unsafe" version of RemoveSCSI. The mutex
// MUST be held when calling this function.
func (uvm *UtilityVM) removeSCSI(hostPath string, uvmPath string, controller int, lun int32) error {
logrus.Debugf("uvm::RemoveSCSI id:%s hostPath:%s", uvm.id, hostPath)
scsiModification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Remove,
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/Scsi/%d/Attachments/%d", controller, lun),
}
// Include the GuestRequest so that the GCS ejects the disk cleanly if the disk was attached/mounted
if uvmPath != "" {
if uvm.operatingSystem == "windows" {
scsiModification.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
RequestType: requesttype.Remove,
Settings: guestrequest.WCOWMappedVirtualDisk{
ContainerPath: uvmPath,
Lun: lun,
},
}
} else {
scsiModification.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeMappedVirtualDisk,
RequestType: requesttype.Remove,
Settings: guestrequest.LCOWMappedVirtualDisk{
MountPath: uvmPath, // May be blank in attach-only
Lun: uint8(lun),
Controller: uint8(controller),
},
}
}
}
if err := uvm.Modify(scsiModification); err != nil {
return err
}
uvm.scsiLocations[controller][lun] = scsiInfo{}
logrus.Debugf("uvm::RemoveSCSI: Success %s removed from %s %d:%d", hostPath, uvm.id, controller, lun)
return nil
}
// GetScsiUvmPath returns the guest mounted path of a SCSI drive.
//
// If `hostPath` is not mounted returns `ErrNotAttached`.
func (uvm *UtilityVM) GetScsiUvmPath(hostPath string) (string, error) {
uvm.m.Lock()
defer uvm.m.Unlock()
_, _, uvmPath, err := uvm.findSCSIAttachment(hostPath)
return uvmPath, err
}

View File

@ -0,0 +1,98 @@
package uvm
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net"
"syscall"
"github.com/sirupsen/logrus"
)
const _ERROR_CONNECTION_ABORTED syscall.Errno = 1236
var _ = (OutputHandler)(parseLogrus)
func parseLogrus(r io.Reader) {
j := json.NewDecoder(r)
logger := logrus.StandardLogger()
for {
e := logrus.Entry{Logger: logger}
err := j.Decode(&e.Data)
if err == io.EOF || err == _ERROR_CONNECTION_ABORTED {
break
}
if err != nil {
// Something went wrong. Read the rest of the data as a single
// string and log it at once -- it's probably a GCS panic stack.
logrus.Error("gcs log read: ", err)
rest, _ := ioutil.ReadAll(io.MultiReader(j.Buffered(), r))
if len(rest) != 0 {
logrus.Error("gcs stderr: ", string(rest))
}
break
}
msg := e.Data["msg"]
delete(e.Data, "msg")
lvl := e.Data["level"]
delete(e.Data, "level")
e.Data["vm.time"] = e.Data["time"]
delete(e.Data, "time")
switch lvl {
case "debug":
e.Debug(msg)
case "info":
e.Info(msg)
case "warning":
e.Warning(msg)
case "error", "fatal":
e.Error(msg)
default:
e.Info(msg)
}
}
}
type acceptResult struct {
c net.Conn
err error
}
func processOutput(ctx context.Context, l net.Listener, doneChan chan struct{}, handler OutputHandler) {
defer close(doneChan)
ch := make(chan acceptResult)
go func() {
c, err := l.Accept()
ch <- acceptResult{c, err}
}()
select {
case <-ctx.Done():
l.Close()
return
case ar := <-ch:
c, err := ar.c, ar.err
l.Close()
if err != nil {
logrus.Error("accepting log socket: ", err)
return
}
defer c.Close()
handler(c)
}
}
// Start synchronously starts the utility VM.
func (uvm *UtilityVM) Start() error {
if uvm.outputListener != nil {
ctx, cancel := context.WithCancel(context.Background())
go processOutput(ctx, uvm.outputListener, uvm.outputProcessingDone, uvm.outputHandler)
uvm.outputProcessingCancel = cancel
uvm.outputListener = nil
}
return uvm.hcsSystem.Start()
}

View File

@ -0,0 +1,7 @@
package uvm
import "github.com/Microsoft/hcsshim/internal/hcs"
func (uvm *UtilityVM) ComputeSystem() *hcs.System {
return uvm.hcsSystem
}

View File

@ -0,0 +1,7 @@
package uvm
// Terminate requests a utility VM terminate. If IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (uvm *UtilityVM) Terminate() error {
return uvm.hcsSystem.Terminate()
}

View File

@ -0,0 +1,105 @@
package uvm
// This package describes the external interface for utility VMs.
import (
"context"
"net"
"sync"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hns"
)
// | WCOW | LCOW
// Container scratch | SCSI | SCSI
// Scratch space | ---- | SCSI // For file system utilities. /tmp/scratch
// Read-Only Layer | VSMB | VPMEM
// Mapped Directory | VSMB | PLAN9
// vsmbShare is an internal structure used for ref-counting VSMB shares mapped to a Windows utility VM.
type vsmbShare struct {
refCount uint32
name string
guestRequest interface{}
}
// scsiInfo is an internal structure used for determining what is mapped to a utility VM.
// hostPath is required. uvmPath may be blank.
type scsiInfo struct {
hostPath string
uvmPath string
// While most VHDs attached to SCSI are scratch spaces, in the case of LCOW
// when the size is over the size possible to attach to PMEM, we use SCSI for
// read-only layers. As RO layers are shared, we perform ref-counting.
isLayer bool
refCount uint32
}
// vpmemInfo is an internal structure used for determining VPMem devices mapped to
// a Linux utility VM.
type vpmemInfo struct {
hostPath string
uvmPath string
refCount uint32
}
// plan9Info is an internal structure used for ref-counting Plan9 shares mapped to a Linux utility VM.
type plan9Info struct {
refCount uint32
idCounter uint64
uvmPath string
port int32 // Temporary. TODO Remove
}
type nicInfo struct {
ID guid.GUID
Endpoint *hns.HNSEndpoint
}
type namespaceInfo struct {
nics []nicInfo
refCount int
}
// UtilityVM is the object used by clients representing a utility VM
type UtilityVM struct {
id string // Identifier for the utility VM (user supplied or generated)
owner string // Owner for the utility VM (user supplied or generated)
operatingSystem string // "windows" or "linux"
hcsSystem *hcs.System // The handle to the compute system
m sync.Mutex // Lock for adding/removing devices
// containerCounter is the current number of containers that have been
// created. This is never decremented in the life of the UVM.
//
// NOTE: All accesses to this MUST be done atomically.
containerCounter uint64
// VSMB shares that are mapped into a Windows UVM. These are used for read-only
// layers and mapped directories
vsmbShares map[string]*vsmbShare
vsmbCounter uint64 // Counter to generate a unique share name for each VSMB share.
// VPMEM devices that are mapped into a Linux UVM. These are used for read-only layers, or for
// booting from VHD.
vpmemDevices [MaxVPMEMCount]vpmemInfo // Limited by ACPI size.
vpmemMaxCount uint32 // Actual number of VPMem devices
vpmemMaxSizeBytes uint64 // Actual size of VPMem devices
// SCSI devices that are mapped into a Windows or Linux utility VM
scsiLocations [4][64]scsiInfo // Hyper-V supports 4 controllers, 64 slots per controller. Limited to 1 controller for now though.
scsiControllerCount uint32 // Number of SCSI controllers in the utility VM
// Plan9 are directories mapped into a Linux utility VM
plan9Shares map[string]*plan9Info
plan9Counter uint64 // Each newly-added plan9 share has a counter used as its ID in the ResourceURI and for the name
namespaces map[string]*namespaceInfo
outputListener net.Listener
outputProcessingDone chan struct{}
outputHandler OutputHandler
outputProcessingCancel context.CancelFunc
}

View File

@ -0,0 +1,170 @@
package uvm
import (
"fmt"
"github.com/Microsoft/hcsshim/internal/guestrequest"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/Microsoft/hcsshim/internal/wclayer"
"github.com/sirupsen/logrus"
)
// allocateVPMEM finds the next available VPMem slot. The lock MUST be held
// when calling this function.
func (uvm *UtilityVM) allocateVPMEM(hostPath string) (uint32, error) {
for index, vi := range uvm.vpmemDevices {
if vi.hostPath == "" {
vi.hostPath = hostPath
logrus.Debugf("uvm::allocateVPMEM %d %q", index, hostPath)
return uint32(index), nil
}
}
return 0, fmt.Errorf("no free VPMEM locations")
}
func (uvm *UtilityVM) deallocateVPMEM(deviceNumber uint32) error {
uvm.m.Lock()
defer uvm.m.Unlock()
uvm.vpmemDevices[deviceNumber] = vpmemInfo{}
return nil
}
// Lock must be held when calling this function
func (uvm *UtilityVM) findVPMEMDevice(findThisHostPath string) (uint32, string, error) {
for deviceNumber, vi := range uvm.vpmemDevices {
if vi.hostPath == findThisHostPath {
logrus.Debugf("uvm::findVPMEMDeviceNumber %d %s", deviceNumber, findThisHostPath)
return uint32(deviceNumber), vi.uvmPath, nil
}
}
return 0, "", fmt.Errorf("%s is not attached to VPMEM", findThisHostPath)
}
// AddVPMEM adds a VPMEM disk to a utility VM at the next available location.
//
// Returns the location(0..MaxVPMEM-1) where the device is attached, and if exposed,
// the utility VM path which will be /tmp/p<location>//
func (uvm *UtilityVM) AddVPMEM(hostPath string, expose bool) (uint32, string, error) {
if uvm.operatingSystem != "linux" {
return 0, "", errNotSupported
}
logrus.Debugf("uvm::AddVPMEM id:%s hostPath:%s expose:%t", uvm.id, hostPath, expose)
uvm.m.Lock()
defer uvm.m.Unlock()
var deviceNumber uint32
var err error
uvmPath := ""
deviceNumber, uvmPath, err = uvm.findVPMEMDevice(hostPath)
if err != nil {
// Ensure the utility VM has access
if err := wclayer.GrantVmAccess(uvm.ID(), hostPath); err != nil {
return 0, "", err
}
// It doesn't exist, so we're going to allocate and hot-add it
deviceNumber, err = uvm.allocateVPMEM(hostPath)
if err != nil {
return 0, "", err
}
modification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Add,
Settings: hcsschema.VirtualPMemDevice{
HostPath: hostPath,
ReadOnly: true,
ImageFormat: "Vhd1",
},
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/VirtualPMem/Devices/%d", deviceNumber),
}
if expose {
uvmPath = fmt.Sprintf("/tmp/p%d", deviceNumber)
modification.GuestRequest = guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeVPMemDevice,
RequestType: requesttype.Add,
Settings: guestrequest.LCOWMappedVPMemDevice{
DeviceNumber: deviceNumber,
MountPath: uvmPath,
},
}
}
if err := uvm.Modify(modification); err != nil {
uvm.vpmemDevices[deviceNumber] = vpmemInfo{}
return 0, "", fmt.Errorf("uvm::AddVPMEM: failed to modify utility VM configuration: %s", err)
}
uvm.vpmemDevices[deviceNumber] = vpmemInfo{
hostPath: hostPath,
refCount: 1,
uvmPath: uvmPath}
} else {
pmemi := vpmemInfo{
hostPath: hostPath,
refCount: uvm.vpmemDevices[deviceNumber].refCount + 1,
uvmPath: uvmPath}
uvm.vpmemDevices[deviceNumber] = pmemi
}
logrus.Debugf("hcsshim::AddVPMEM id:%s Success %+v", uvm.id, uvm.vpmemDevices[deviceNumber])
return deviceNumber, uvmPath, nil
}
// RemoveVPMEM removes a VPMEM disk from a utility VM. As an external API, it
// is "safe". Internal use can call removeVPMEM.
func (uvm *UtilityVM) RemoveVPMEM(hostPath string) error {
if uvm.operatingSystem != "linux" {
return errNotSupported
}
uvm.m.Lock()
defer uvm.m.Unlock()
// Make sure is actually attached
deviceNumber, uvmPath, err := uvm.findVPMEMDevice(hostPath)
if err != nil {
return fmt.Errorf("cannot remove VPMEM %s as it is not attached to utility VM %s: %s", hostPath, uvm.id, err)
}
if err := uvm.removeVPMEM(hostPath, uvmPath, deviceNumber); err != nil {
return fmt.Errorf("failed to remove VPMEM %s from utility VM %s: %s", hostPath, uvm.id, err)
}
return nil
}
// removeVPMEM is the internally callable "unsafe" version of RemoveVPMEM. The mutex
// MUST be held when calling this function.
func (uvm *UtilityVM) removeVPMEM(hostPath string, uvmPath string, deviceNumber uint32) error {
logrus.Debugf("uvm::RemoveVPMEM id:%s hostPath:%s device:%d", uvm.id, hostPath, deviceNumber)
if uvm.vpmemDevices[deviceNumber].refCount == 1 {
modification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Remove,
ResourcePath: fmt.Sprintf("VirtualMachine/Devices/VirtualPMem/Devices/%d", deviceNumber),
GuestRequest: guestrequest.GuestRequest{
ResourceType: guestrequest.ResourceTypeVPMemDevice,
RequestType: requesttype.Remove,
Settings: guestrequest.LCOWMappedVPMemDevice{
DeviceNumber: deviceNumber,
MountPath: uvmPath,
},
},
}
if err := uvm.Modify(modification); err != nil {
return err
}
uvm.vpmemDevices[deviceNumber] = vpmemInfo{}
logrus.Debugf("uvm::RemoveVPMEM: Success id:%s hostPath:%s device:%d", uvm.id, hostPath, deviceNumber)
return nil
}
uvm.vpmemDevices[deviceNumber].refCount--
logrus.Debugf("uvm::RemoveVPMEM: Success id:%s hostPath:%s device:%d refCount:%d", uvm.id, hostPath, deviceNumber, uvm.vpmemDevices[deviceNumber].refCount)
return nil
}

View File

@ -0,0 +1,112 @@
package uvm
import (
"fmt"
"strconv"
"github.com/Microsoft/hcsshim/internal/requesttype"
"github.com/Microsoft/hcsshim/internal/schema2"
"github.com/sirupsen/logrus"
)
// findVSMBShare finds a share by `hostPath`. If not found returns `ErrNotAttached`.
func (uvm *UtilityVM) findVSMBShare(hostPath string) (*vsmbShare, error) {
share, ok := uvm.vsmbShares[hostPath]
if !ok {
return nil, ErrNotAttached
}
return share, nil
}
func (share *vsmbShare) GuestPath() string {
return `\\?\VMSMB\VSMB-{dcc079ae-60ba-4d07-847c-3493609c0870}\` + share.name
}
// AddVSMB adds a VSMB share to a Windows utility VM. Each VSMB share is ref-counted and
// only added if it isn't already. This is used for read-only layers, mapped directories
// to a container, and for mapped pipes.
func (uvm *UtilityVM) AddVSMB(hostPath string, guestRequest interface{}, options *hcsschema.VirtualSmbShareOptions) error {
if uvm.operatingSystem != "windows" {
return errNotSupported
}
logrus.Debugf("uvm::AddVSMB %s %+v %+v id:%s", hostPath, guestRequest, options, uvm.id)
uvm.m.Lock()
defer uvm.m.Unlock()
share, err := uvm.findVSMBShare(hostPath)
if err == ErrNotAttached {
uvm.vsmbCounter++
shareName := "s" + strconv.FormatUint(uvm.vsmbCounter, 16)
modification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Add,
Settings: hcsschema.VirtualSmbShare{
Name: shareName,
Options: options,
Path: hostPath,
},
ResourcePath: "VirtualMachine/Devices/VirtualSmb/Shares",
}
if err := uvm.Modify(modification); err != nil {
return err
}
share = &vsmbShare{
name: shareName,
guestRequest: guestRequest,
}
uvm.vsmbShares[hostPath] = share
}
share.refCount++
logrus.Debugf("hcsshim::AddVSMB Success %s: refcount=%d %+v", hostPath, share.refCount, share)
return nil
}
// RemoveVSMB removes a VSMB share from a utility VM. Each VSMB share is ref-counted
// and only actually removed when the ref-count drops to zero.
func (uvm *UtilityVM) RemoveVSMB(hostPath string) error {
if uvm.operatingSystem != "windows" {
return errNotSupported
}
logrus.Debugf("uvm::RemoveVSMB %s id:%s", hostPath, uvm.id)
uvm.m.Lock()
defer uvm.m.Unlock()
share, err := uvm.findVSMBShare(hostPath)
if err != nil {
return fmt.Errorf("%s is not present as a VSMB share in %s, cannot remove", hostPath, uvm.id)
}
share.refCount--
if share.refCount > 0 {
logrus.Debugf("uvm::RemoveVSMB Success %s id:%s Ref-count now %d. It is still present in the utility VM", hostPath, uvm.id, share.refCount)
return nil
}
logrus.Debugf("uvm::RemoveVSMB Zero ref-count, removing. %s id:%s", hostPath, uvm.id)
modification := &hcsschema.ModifySettingRequest{
RequestType: requesttype.Remove,
Settings: hcsschema.VirtualSmbShare{Name: share.name},
ResourcePath: "VirtualMachine/Devices/VirtualSmb/Shares",
}
if err := uvm.Modify(modification); err != nil {
return fmt.Errorf("failed to remove vsmb share %s from %s: %+v: %s", hostPath, uvm.id, modification, err)
}
logrus.Debugf("uvm::RemoveVSMB Success %s id:%s successfully removed from utility VM", hostPath, uvm.id)
delete(uvm.vsmbShares, hostPath)
return nil
}
// GetVSMBUvmPath returns the guest path of a VSMB mount.
func (uvm *UtilityVM) GetVSMBUvmPath(hostPath string) (string, error) {
if hostPath == "" {
return "", fmt.Errorf("no hostPath passed to GetVSMBUvmPath")
}
uvm.m.Lock()
defer uvm.m.Unlock()
share, err := uvm.findVSMBShare(hostPath)
if err != nil {
return "", err
}
path := share.GuestPath()
logrus.Debugf("uvm::GetVSMBUvmPath Success %s id:%s path:%s", hostPath, uvm.id, path)
return path, nil
}

View File

@ -0,0 +1,46 @@
package uvm
import (
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/sirupsen/logrus"
)
func (uvm *UtilityVM) waitForOutput() {
logrus.WithField(logfields.UVMID, uvm.ID()).
Debug("UVM exited, waiting for output processing to complete")
if uvm.outputProcessingDone != nil {
<-uvm.outputProcessingDone
}
}
// Waits synchronously waits for a utility VM to terminate.
func (uvm *UtilityVM) Wait() error {
err := uvm.hcsSystem.Wait()
// outputProcessingCancel will only cancel waiting for the vsockexec
// connection, it won't stop output processing once the connection is
// established.
if uvm.outputProcessingCancel != nil {
uvm.outputProcessingCancel()
}
uvm.waitForOutput()
return err
}
// WaitExpectedError synchronously waits for a utility VM to terminate. If the
// UVM terminates successfully, or if the given error is encountered internally
// during the wait, this function returns nil.
func (uvm *UtilityVM) WaitExpectedError(expected error) error {
err := uvm.hcsSystem.WaitExpectedError(expected)
// outputProcessingCancel will only cancel waiting for the vsockexec
// connection, it won't stop output processing once the connection is
// established.
if uvm.outputProcessingCancel != nil {
uvm.outputProcessingCancel()
}
uvm.waitForOutput()
return err
}