Merge pull request #2268 from jhowardmsft/boltdb

boltdb/bolt==>bbolt, revendor Microsoft/*, Windows compilation.
This commit is contained in:
Christopher Adam Telfer 2018-09-13 16:00:09 -04:00 committed by GitHub
commit e1b464e78f
118 changed files with 5877 additions and 4113 deletions

View file

@ -0,0 +1,3 @@
package ns
// File is present so that go build ./... is closer to working on Windows from repo root.

View file

@ -1,3 +1,5 @@
// +build !windows
package portallocator
import (

View file

@ -0,0 +1 @@
package portallocator

View file

@ -1,21 +1,21 @@
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/Microsoft/go-winio v0.4.7
github.com/Microsoft/hcsshim v0.6.11
github.com/Microsoft/go-winio v0.4.11
github.com/Microsoft/hcsshim v0.7.3
github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904
github.com/codegangsta/cli a65b733b303f0055f8d324d805f393cd3e7a7904
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
github.com/coreos/etcd v3.2.1
github.com/coreos/go-semver v0.2.0
github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
go.etcd.io/bbolt v1.3.1-etcd.8
github.com/docker/docker 162ba6016def672690ee4a1f3978368853a1e149
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
github.com/docker/libkv 458977154600b9f23984d9f4b82e79570b5ae12b
github.com/godbus/dbus v4.0.0
github.com/gogo/protobuf v1.0.0
@ -36,7 +36,6 @@ github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
github.com/opencontainers/runtime-spec v1.0.1
github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
github.com/sirupsen/logrus v1.0.3
github.com/syndtr/gocapability 33e07d32887e1e06b7c025f27ce52f62c7990bc0
github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065
github.com/vishvananda/netlink b2de5d10e38ecce8607e6b438b6d174f389a004e
github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25

View file

@ -1,137 +1,137 @@
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

View file

@ -20,7 +20,8 @@ const (
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uintptr // includes padding
FileAttributes uint32
pad uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.

View file

@ -15,7 +15,6 @@ import (
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
@ -121,6 +120,11 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) {
// zero-byte message, ensure that all future Read() calls
// also return EOF.
f.readEOF = true
} else if err == syscall.ERROR_MORE_DATA {
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
// and the message still has more bytes. Treat this as a success, since
// this package presents all named pipes as byte streams.
err = nil
}
return n, err
}
@ -134,12 +138,14 @@ func (s pipeAddress) String() string {
}
// DialPipe connects to a named pipe by path, timing out if the connection
// takes longer than the specified duration. If timeout is nil, then the timeout
// is the default timeout established by the pipe server.
// takes longer than the specified duration. If timeout is nil, then we use
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
var absTimeout time.Time
if timeout != nil {
absTimeout = time.Now().Add(*timeout)
} else {
absTimeout = time.Now().Add(time.Second * 2)
}
var err error
var h syscall.Handle
@ -148,22 +154,13 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
if err != cERROR_PIPE_BUSY {
break
}
now := time.Now()
var ms uint32
if absTimeout.IsZero() {
ms = cNMPWAIT_USE_DEFAULT_WAIT
} else if now.After(absTimeout) {
ms = cNMPWAIT_NOWAIT
} else {
ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000)
}
err = waitNamedPipe(path, ms)
if err != nil {
if err == cERROR_SEM_TIMEOUT {
return nil, ErrTimeout
}
break
if time.Now().After(absTimeout) {
return nil, ErrTimeout
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
time.Sleep(time.Millisecond * 10)
}
if err != nil {
return nil, &os.PathError{Op: "open", Path: path, Err: err}
@ -175,16 +172,6 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
return nil, err
}
var state uint32
err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0)
if err != nil {
return nil, err
}
if state&cPIPE_READMODE_MESSAGE != 0 {
return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")}
}
f, err := makeWin32File(h)
if err != nil {
syscall.Close(h)
@ -354,13 +341,23 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
if err != nil {
return nil, err
}
// Immediately open and then close a client handle so that the named pipe is
// created but not currently accepting connections.
// Create a client handle and connect it. This results in the pipe
// instance always existing, so that clients see ERROR_PIPE_BUSY
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
// up so that no other instances can be used. This would have been
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
// instead of CreateNamedPipe. (Apparently created named pipes are
// considered to be in listening state regardless of whether any
// active calls to ConnectNamedPipe are outstanding.)
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != nil {
syscall.Close(h)
return nil, err
}
// Close the client handle. The server side of the instance will
// still be busy, leading to ERROR_PIPE_BUSY instead of
// ERROR_NOT_FOUND, as long as we don't close the server handle,
// or disconnect the client with DisconnectNamedPipe.
syscall.Close(h2)
l := &win32PipeListener{
firstHandle: h,

View file

@ -1,12 +1,13 @@
# hcsshim
This package supports launching Windows Server containers from Go. It is
primarily used in the [Docker Engine](https://github.com/docker/docker) project,
but it can be freely used by other projects as well.
[![Build status](https://ci.appveyor.com/api/projects/status/nbcw28mnkqml0loa/branch/master?svg=true)](https://ci.appveyor.com/project/WindowsVirtualization/hcsshim/branch/master)
This package contains the Golang interface for using the Windows [Host Compute Service](https://blogs.technet.microsoft.com/virtualization/2017/01/27/introducing-the-host-compute-service-hcs/) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS).
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well.
## Contributing
---------------
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com.
@ -19,6 +20,11 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Dependencies
This project requires Golang 1.9 or newer to build.
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
## Reporting Security Issues
@ -29,5 +35,7 @@ email to ensure we received your original message. Further information, includin
[MSRC PGP](https://technet.microsoft.com/en-us/security/dn606155) key, can be found in
the [Security TechCenter](https://technet.microsoft.com/en-us/security/default).
-------------------------------------------
For additional details, see [Report a Computer Security Vulnerability](https://technet.microsoft.com/en-us/security/ff852094.aspx) on Technet
---------------
Copyright (c) 2018 Microsoft Corp. All rights reserved.

View file

@ -1,28 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// ActivateLayer will find the layer with the given id and mount it's filesystem.
// For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer.
func ActivateLayer(info DriverInfo, id string) error {
title := "hcsshim::ActivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = activateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour)
return nil
}

View file

@ -1,800 +1,192 @@
package hcsshim
import (
"encoding/json"
"fmt"
"os"
"sync"
"syscall"
"time"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/mergemaps"
"github.com/Microsoft/hcsshim/internal/schema1"
)
var (
defaultTimeout = time.Minute * 4
)
const (
pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}`
statisticsQuery = `{ "PropertyTypes" : ["Statistics"]}`
processListQuery = `{ "PropertyTypes" : ["ProcessList"]}`
mappedVirtualDiskQuery = `{ "PropertyTypes" : ["MappedVirtualDisk"]}`
)
type container struct {
handleLock sync.RWMutex
handle hcsSystem
id string
callbackNumber uintptr
}
// ContainerProperties holds the properties for a container and the processes running in that container
type ContainerProperties struct {
ID string `json:"Id"`
Name string
SystemType string
Owner string
SiloGUID string `json:"SiloGuid,omitempty"`
RuntimeID string `json:"RuntimeId,omitempty"`
IsRuntimeTemplate bool `json:",omitempty"`
RuntimeImagePath string `json:",omitempty"`
Stopped bool `json:",omitempty"`
ExitType string `json:",omitempty"`
AreUpdatesPending bool `json:",omitempty"`
ObRoot string `json:",omitempty"`
Statistics Statistics `json:",omitempty"`
ProcessList []ProcessListItem `json:",omitempty"`
MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"`
}
type ContainerProperties = schema1.ContainerProperties
// MemoryStats holds the memory statistics for a container
type MemoryStats struct {
UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
}
type MemoryStats = schema1.MemoryStats
// ProcessorStats holds the processor statistics for a container
type ProcessorStats struct {
TotalRuntime100ns uint64 `json:",omitempty"`
RuntimeUser100ns uint64 `json:",omitempty"`
RuntimeKernel100ns uint64 `json:",omitempty"`
}
type ProcessorStats = schema1.ProcessorStats
// StorageStats holds the storage statistics for a container
type StorageStats struct {
ReadCountNormalized uint64 `json:",omitempty"`
ReadSizeBytes uint64 `json:",omitempty"`
WriteCountNormalized uint64 `json:",omitempty"`
WriteSizeBytes uint64 `json:",omitempty"`
}
type StorageStats = schema1.StorageStats
// NetworkStats holds the network statistics for a container
type NetworkStats struct {
BytesReceived uint64 `json:",omitempty"`
BytesSent uint64 `json:",omitempty"`
PacketsReceived uint64 `json:",omitempty"`
PacketsSent uint64 `json:",omitempty"`
DroppedPacketsIncoming uint64 `json:",omitempty"`
DroppedPacketsOutgoing uint64 `json:",omitempty"`
EndpointId string `json:",omitempty"`
InstanceId string `json:",omitempty"`
}
type NetworkStats = schema1.NetworkStats
// Statistics is the structure returned by a statistics call on a container
type Statistics struct {
Timestamp time.Time `json:",omitempty"`
ContainerStartTime time.Time `json:",omitempty"`
Uptime100ns uint64 `json:",omitempty"`
Memory MemoryStats `json:",omitempty"`
Processor ProcessorStats `json:",omitempty"`
Storage StorageStats `json:",omitempty"`
Network []NetworkStats `json:",omitempty"`
}
type Statistics = schema1.Statistics
// ProcessList is the structure of an item returned by a ProcessList call on a container
type ProcessListItem struct {
CreateTimestamp time.Time `json:",omitempty"`
ImageName string `json:",omitempty"`
KernelTime100ns uint64 `json:",omitempty"`
MemoryCommitBytes uint64 `json:",omitempty"`
MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
ProcessId uint32 `json:",omitempty"`
UserTime100ns uint64 `json:",omitempty"`
}
type ProcessListItem = schema1.ProcessListItem
// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container
type MappedVirtualDiskController struct {
MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"`
}
type MappedVirtualDiskController = schema1.MappedVirtualDiskController
// Type of Request Support in ModifySystem
type RequestType string
type RequestType = schema1.RequestType
// Type of Resource Support in ModifySystem
type ResourceType string
type ResourceType = schema1.ResourceType
// RequestType const
const (
Add RequestType = "Add"
Remove RequestType = "Remove"
Network ResourceType = "Network"
Add = schema1.Add
Remove = schema1.Remove
Network = schema1.Network
)
// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system
// Supported resource types are Network and Request Types are Add/Remove
type ResourceModificationRequestResponse struct {
Resource ResourceType `json:"ResourceType"`
Data interface{} `json:"Settings"`
Request RequestType `json:"RequestType,omitempty"`
type ResourceModificationRequestResponse = schema1.ResourceModificationRequestResponse
type container struct {
system *hcs.System
}
// createContainerAdditionalJSON is read from the environment at initialisation
// createComputeSystemAdditionalJSON is read from the environment at initialisation
// time. It allows an environment variable to define additional JSON which
// is merged in the CreateContainer call to HCS.
var createContainerAdditionalJSON string
// is merged in the CreateComputeSystem call to HCS.
var createContainerAdditionalJSON []byte
func init() {
createContainerAdditionalJSON = os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON")
createContainerAdditionalJSON = ([]byte)(os.Getenv("HCSSHIM_CREATECONTAINER_ADDITIONALJSON"))
}
// CreateContainer creates a new container with the given configuration but does not start it.
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
return createContainerWithJSON(id, c, "")
}
// CreateContainerWithJSON creates a new container with the given configuration but does not start it.
// It is identical to CreateContainer except that optional additional JSON can be merged before passing to HCS.
func CreateContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
return createContainerWithJSON(id, c, additionalJSON)
}
func createContainerWithJSON(id string, c *ContainerConfig, additionalJSON string) (Container, error) {
operation := "CreateContainer"
title := "HCSShim::" + operation
container := &container{
id: id,
fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON)
if err != nil {
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err)
}
configurationb, err := json.Marshal(c)
system, err := hcs.CreateComputeSystem(id, fullConfig)
if err != nil {
return nil, err
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", id, configuration)
// Merge any additional JSON. Priority is given to what is passed in explicitly,
// falling back to what's set in the environment.
if additionalJSON == "" && createContainerAdditionalJSON != "" {
additionalJSON = createContainerAdditionalJSON
}
if additionalJSON != "" {
configurationMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(configuration), &configurationMap); err != nil {
return nil, fmt.Errorf("failed to unmarshal %s: %s", configuration, err)
}
additionalMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(additionalJSON), &additionalMap); err != nil {
return nil, fmt.Errorf("failed to unmarshal %s: %s", additionalJSON, err)
}
mergedMap := mergeMaps(additionalMap, configurationMap)
mergedJSON, err := json.Marshal(mergedMap)
if err != nil {
return nil, fmt.Errorf("failed to marshal merged configuration map %+v: %s", mergedMap, err)
}
configuration = string(mergedJSON)
logrus.Debugf(title+" id=%s merged config=%s", id, configuration)
}
var (
resultp *uint16
identity syscall.Handle
)
createError := hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp)
if createError == nil || IsPending(createError) {
if err := container.registerCallback(); err != nil {
// Terminate the container if it still exists. We're okay to ignore a failure here.
container.Terminate()
return nil, makeContainerError(container, operation, "", err)
}
}
err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
if err != nil {
if err == ErrTimeout {
// Terminate the container if it still exists. We're okay to ignore a failure here.
container.Terminate()
}
return nil, makeContainerError(container, operation, configuration, err)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle)
return container, nil
}
// mergeMaps recursively merges map `fromMap` into map `ToMap`. Any pre-existing values
// in ToMap are overwritten. Values in fromMap are added to ToMap.
// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang
func mergeMaps(fromMap, ToMap interface{}) interface{} {
switch fromMap := fromMap.(type) {
case map[string]interface{}:
ToMap, ok := ToMap.(map[string]interface{})
if !ok {
return fromMap
}
for keyToMap, valueToMap := range ToMap {
if valueFromMap, ok := fromMap[keyToMap]; ok {
fromMap[keyToMap] = mergeMaps(valueFromMap, valueToMap)
} else {
fromMap[keyToMap] = valueToMap
}
}
case nil:
// merge(nil, map[string]interface{...}) -> map[string]interface{...}
ToMap, ok := ToMap.(map[string]interface{})
if ok {
return ToMap
}
}
return fromMap
return &container{system}, err
}
// OpenContainer opens an existing container by ID.
func OpenContainer(id string) (Container, error) {
operation := "OpenContainer"
title := "HCSShim::" + operation
logrus.Debugf(title+" id=%s", id)
container := &container{
id: id,
}
var (
handle hcsSystem
resultp *uint16
)
err := hcsOpenComputeSystem(id, &handle, &resultp)
err = processHcsResult(err, resultp)
system, err := hcs.OpenComputeSystem(id)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
return nil, err
}
container.handle = handle
if err := container.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
return container, nil
return &container{system}, err
}
// GetContainers gets a list of the containers on the system that match the query
func GetContainers(q ComputeSystemQuery) ([]ContainerProperties, error) {
operation := "GetContainers"
title := "HCSShim::" + operation
queryb, err := json.Marshal(q)
if err != nil {
return nil, err
}
query := string(queryb)
logrus.Debugf(title+" query=%s", query)
var (
resultp *uint16
computeSystemsp *uint16
)
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if computeSystemsp == nil {
return nil, ErrUnexpectedValue
}
computeSystemsRaw := convertAndFreeCoTaskMemBytes(computeSystemsp)
computeSystems := []ContainerProperties{}
if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
return nil, err
}
logrus.Debugf(title + " succeeded")
return computeSystems, nil
return hcs.GetComputeSystems(q)
}
// Start synchronously starts the container.
func (container *container) Start() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Start"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsStartComputeSystem(container.handle, "", &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
return convertSystemError(container.system.Start(), container)
}
// Shutdown requests a container shutdown, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
// Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds.
func (container *container) Shutdown() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Shutdown"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsShutdownComputeSystem(container.handle, "", &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
return convertSystemError(container.system.Shutdown(), container)
}
// Terminate requests a container terminate, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds.
func (container *container) Terminate() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Terminate"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsTerminateComputeSystem(container.handle, "", &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
return convertSystemError(container.system.Terminate(), container)
}
// Wait synchronously waits for the container to shutdown or terminate.
// Waits synchronously waits for the container to shutdown or terminate.
func (container *container) Wait() error {
operation := "Wait"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
return convertSystemError(container.system.Wait(), container)
}
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (container *container) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It
// returns false if timeout occurs.
func (container *container) WaitTimeout(t time.Duration) error {
return convertSystemError(container.system.WaitTimeout(t), container)
}
func (container *container) properties(query string) (*ContainerProperties, error) {
var (
resultp *uint16
propertiesp *uint16
)
err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
// Pause pauses the execution of a container.
func (container *container) Pause() error {
return convertSystemError(container.system.Pause(), container)
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
properties := &ContainerProperties{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, err
}
return properties, nil
// Resume resumes the execution of a container.
func (container *container) Resume() error {
return convertSystemError(container.system.Resume(), container)
}
// HasPendingUpdates returns true if the container has updates pending to install
func (container *container) HasPendingUpdates() (bool, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "HasPendingUpdates"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return false, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(pendingUpdatesQuery)
if err != nil {
return false, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.AreUpdatesPending, nil
return false, nil
}
// Statistics returns statistics for the container
// Statistics returns statistics for the container. This is a legacy v1 call
func (container *container) Statistics() (Statistics, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Statistics"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return Statistics{}, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(statisticsQuery)
properties, err := container.system.Properties(schema1.PropertyTypeStatistics)
if err != nil {
return Statistics{}, makeContainerError(container, operation, "", err)
return Statistics{}, convertSystemError(err, container)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.Statistics, nil
}
// ProcessList returns an array of ProcessListItems for the container
// ProcessList returns an array of ProcessListItems for the container. This is a legacy v1 call
func (container *container) ProcessList() ([]ProcessListItem, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "ProcessList"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(processListQuery)
properties, err := container.system.Properties(schema1.PropertyTypeProcessList)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
return nil, convertSystemError(err, container)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.ProcessList, nil
}
// MappedVirtualDisks returns a map of the controllers and the disks mapped
// to a container.
//
// Example of JSON returned by the query.
//{
// "Id":"1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3_svm",
// "SystemType":"Container",
// "RuntimeOsType":"Linux",
// "RuntimeId":"00000000-0000-0000-0000-000000000000",
// "State":"Running",
// "MappedVirtualDiskControllers":{
// "0":{
// "MappedVirtualDisks":{
// "2":{
// "HostPath":"C:\\lcow\\lcow\\scratch\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3.vhdx",
// "ContainerPath":"/mnt/gcs/LinuxServiceVM/scratch",
// "Lun":2,
// "CreateInUtilityVM":true
// },
// "3":{
// "HostPath":"C:\\lcow\\lcow\\1126e8d7d279c707a666972a15976371d365eaf622c02cea2c442b84f6f550a3\\sandbox.vhdx",
// "Lun":3,
// "CreateInUtilityVM":true,
// "AttachOnly":true
// }
// }
// }
// }
//}
// This is a legacy v1 call
func (container *container) MappedVirtualDisks() (map[int]MappedVirtualDiskController, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "MappedVirtualDiskList"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
properties, err := container.properties(mappedVirtualDiskQuery)
properties, err := container.system.Properties(schema1.PropertyTypeMappedVirtualDisk)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
return nil, convertSystemError(err, container)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return properties.MappedVirtualDiskControllers, nil
}
// Pause pauses the execution of the container. This feature is not enabled in TP5.
func (container *container) Pause() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Pause"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsPauseComputeSystem(container.handle, "", &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// Resume resumes the execution of the container. This feature is not enabled in TP5.
func (container *container) Resume() error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Resume"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsResumeComputeSystem(container.handle, "", &resultp)
err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
}
// CreateProcess launches a new process within the container.
func (container *container) CreateProcess(c *ProcessConfig) (Process, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "CreateProcess"
title := "HCSShim::Container::" + operation
var (
processInfo hcsProcessInformation
processHandle hcsProcess
resultp *uint16
)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
// If we are not emulating a console, ignore any console size passed to us
if !c.EmulateConsole {
c.ConsoleSize[0] = 0
c.ConsoleSize[1] = 0
}
configurationb, err := json.Marshal(c)
p, err := container.system.CreateProcess(c)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
return nil, convertSystemError(err, container)
}
configuration := string(configurationb)
logrus.Debugf(title+" id=%s config=%s", container.id, configuration)
err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, makeContainerError(container, operation, configuration, err)
}
process := &process{
handle: processHandle,
processID: int(processInfo.ProcessId),
container: container,
cachedPipes: &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
},
}
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s processid=%d", container.id, process.processID)
return process, nil
return &process{p}, nil
}
// OpenProcess gets an interface to an existing process within the container.
func (container *container) OpenProcess(pid int) (Process, error) {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "OpenProcess"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s, processid=%d", container.id, pid)
var (
processHandle hcsProcess
resultp *uint16
)
if container.handle == 0 {
return nil, makeContainerError(container, operation, "", ErrAlreadyClosed)
}
err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp)
err = processHcsResult(err, resultp)
p, err := container.system.OpenProcess(pid)
if err != nil {
return nil, makeContainerError(container, operation, "", err)
return nil, convertSystemError(err, container)
}
process := &process{
handle: processHandle,
processID: pid,
container: container,
}
if err := process.registerCallback(); err != nil {
return nil, makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID)
return process, nil
return &process{p}, nil
}
// Close cleans up any state associated with the container but does not terminate or wait for it.
func (container *container) Close() error {
container.handleLock.Lock()
defer container.handleLock.Unlock()
operation := "Close"
title := "HCSShim::Container::" + operation
logrus.Debugf(title+" id=%s", container.id)
// Don't double free this
if container.handle == 0 {
return nil
}
if err := container.unregisterCallback(); err != nil {
return makeContainerError(container, operation, "", err)
}
if err := hcsCloseComputeSystem(container.handle); err != nil {
return makeContainerError(container, operation, "", err)
}
container.handle = 0
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
return convertSystemError(container.system.Close(), container)
}
func (container *container) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
container.callbackNumber = callbackNumber
return nil
}
func (container *container) unregisterCallback() error {
callbackNumber := container.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterComputeSystemCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterComputeSystemCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}
// Modifies the System by sending a request to HCS
// Modify the System
func (container *container) Modify(config *ResourceModificationRequestResponse) error {
container.handleLock.RLock()
defer container.handleLock.RUnlock()
operation := "Modify"
title := "HCSShim::Container::" + operation
if container.handle == 0 {
return makeContainerError(container, operation, "", ErrAlreadyClosed)
}
requestJSON, err := json.Marshal(config)
if err != nil {
return err
}
requestString := string(requestJSON)
logrus.Debugf(title+" id=%s request=%s", container.id, requestString)
var resultp *uint16
err = hcsModifyComputeSystem(container.handle, requestString, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeContainerError(container, operation, "", err)
}
logrus.Debugf(title+" succeeded id=%s", container.id)
return nil
return convertSystemError(container.system.Modify(config), container)
}

View file

@ -1,27 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided.
func CreateLayer(info DriverInfo, id, parent string) error {
title := "hcsshim::CreateLayer "
logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createLayer(&infop, id, parent)
if err != nil {
err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour)
return nil
}

View file

@ -1,35 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// CreateSandboxLayer creates and populates new read-write layer for use by a container.
// This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent
// whose id was provided).
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
title := "hcsshim::CreateSandboxLayer "
logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = createSandboxLayer(&infop, layerId, parentId, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId)
return nil
}

View file

@ -1,26 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
func DeactivateLayer(info DriverInfo, id string) error {
title := "hcsshim::DeactivateLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = deactivateLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

View file

@ -1,27 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// DestroyLayer will remove the on-disk files representing the layer with the given
// id, including that layer's containing folder, if any.
func DestroyLayer(info DriverInfo, id string) error {
title := "hcsshim::DestroyLayer "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = destroyLayer(&infop, id)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id)
return nil
}

View file

@ -1,92 +1,83 @@
package hcsshim
import (
"errors"
"fmt"
"syscall"
"github.com/Microsoft/hcsshim/internal/hns"
"github.com/Microsoft/hcsshim/internal/hcs"
"github.com/Microsoft/hcsshim/internal/hcserror"
)
var (
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists = hcs.exist
ErrComputeSystemDoesNotExist = hcs.ErrComputeSystemDoesNotExist
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrElementNotFound = syscall.Errno(0x490)
ErrElementNotFound = hcs.ErrElementNotFound
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrNotSupported = syscall.Errno(0x32)
ErrNotSupported = hcs.ErrNotSupported
// ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
// decimal -2147024883 / hex 0x8007000d
ErrInvalidData = syscall.Errno(0xd)
ErrInvalidData = hcs.ErrInvalidData
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
ErrHandleClose = hcs.ErrHandleClose
// ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed")
ErrAlreadyClosed = hcs.ErrAlreadyClosed
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
ErrInvalidNotificationType = hcs.ErrInvalidNotificationType
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
ErrInvalidProcessState = hcs.ErrInvalidProcessState
// ErrTimeout is an error encountered when waiting on a notification times out
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
ErrTimeout = hcs.ErrTimeout
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
// a different expected notification
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
ErrUnexpectedContainerExit = hcs.ErrUnexpectedContainerExit
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
// is lost while waiting for a notification
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
ErrUnexpectedProcessAbort = hcs.ErrUnexpectedProcessAbort
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
ErrUnexpectedValue = hcs.ErrUnexpectedValue
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
ErrVmcomputeOperationPending = hcs.ErrVmcomputeOperationPending
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
ErrVmcomputeOperationInvalidState = hcs.ErrVmcomputeOperationInvalidState
// ErrProcNotFound is an error encountered when the the process cannot be found
ErrProcNotFound = syscall.Errno(0x7f)
ErrProcNotFound = hcs.ErrProcNotFound
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
// builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5)
ErrVmcomputeOperationAccessIsDenied = hcs.ErrVmcomputeOperationAccessIsDenied
// ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d)
ErrVmcomputeInvalidJSON = hcs.ErrVmcomputeInvalidJSON
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
ErrVmcomputeUnknownMessage = hcs.ErrVmcomputeUnknownMessage
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
ErrPlatformNotSupported = hcs.ErrPlatformNotSupported
)
type EndpointNotFoundError struct {
EndpointName string
}
func (e EndpointNotFoundError) Error() string {
return fmt.Sprintf("Endpoint %s not found", e.EndpointName)
}
type NetworkNotFoundError struct {
NetworkName string
}
func (e NetworkNotFoundError) Error() string {
return fmt.Sprintf("Network %s not found", e.NetworkName)
}
type EndpointNotFoundError = hns.EndpointNotFoundError
type NetworkNotFoundError = hns.NetworkNotFoundError
// ProcessError is an error encountered in HCS during an operation on a Process object
type ProcessError struct {
@ -94,6 +85,7 @@ type ProcessError struct {
Operation string
ExtraInfo string
Err error
Events []hcs.ErrorEvent
}
// ContainerError is an error encountered in HCS during an operation on a Container object
@ -102,6 +94,7 @@ type ContainerError struct {
Operation string
ExtraInfo string
Err error
Events []hcs.ErrorEvent
}
func (e *ContainerError) Error() string {
@ -113,7 +106,7 @@ func (e *ContainerError) Error() string {
return "unexpected nil container for error: " + e.Err.Error()
}
s := "container " + e.Container.id
s := "container " + e.Container.system.ID()
if e.Operation != "" {
s += " encountered an error during " + e.Operation
@ -123,11 +116,15 @@ func (e *ContainerError) Error() string {
case nil:
break
case syscall.Errno:
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err))
default:
s += fmt.Sprintf(": %s", e.Err.Error())
}
for _, ev := range e.Events {
s += "\n" + ev.String()
}
if e.ExtraInfo != "" {
s += " extra info: " + e.ExtraInfo
}
@ -153,12 +150,7 @@ func (e *ProcessError) Error() string {
return "Unexpected nil process for error: " + e.Err.Error()
}
s := fmt.Sprintf("process %d", e.Process.processID)
if e.Process.container != nil {
s += " in container " + e.Process.container.id
}
s := fmt.Sprintf("process %d in container %s", e.Process.p.Pid(), e.Process.p.SystemID())
if e.Operation != "" {
s += " encountered an error during " + e.Operation
}
@ -167,11 +159,15 @@ func (e *ProcessError) Error() string {
case nil:
break
case syscall.Errno:
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, win32FromError(e.Err))
s += fmt.Sprintf(": failure in a Windows system call: %s (0x%x)", e.Err, hcserror.Win32FromError(e.Err))
default:
s += fmt.Sprintf(": %s", e.Err.Error())
}
for _, ev := range e.Events {
s += "\n" + ev.String()
}
return s
}
@ -189,37 +185,31 @@ func makeProcessError(process *process, operation string, extraInfo string, err
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsNotExist(err error) bool {
err = getInnerError(err)
if _, ok := err.(EndpointNotFoundError); ok {
return true
}
if _, ok := err.(NetworkNotFoundError); ok {
return true
}
return err == ErrComputeSystemDoesNotExist ||
err == ErrElementNotFound ||
err == ErrProcNotFound
return hcs.IsNotExist(getInnerError(err))
}
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool {
err = getInnerError(err)
return err == ErrAlreadyClosed
return hcs.IsAlreadyClosed(getInnerError(err))
}
// IsPending returns a boolean indicating whether the error is that
// the requested operation is being completed in the background.
func IsPending(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeOperationPending
return hcs.IsPending(getInnerError(err))
}
// IsTimeout returns a boolean indicating whether the error is caused by
// a timeout waiting for the operation to complete.
func IsTimeout(err error) bool {
err = getInnerError(err)
return err == ErrTimeout
return hcs.IsTimeout(getInnerError(err))
}
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
@ -228,10 +218,7 @@ func IsTimeout(err error) bool {
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
err == ErrElementNotFound ||
err == ErrProcNotFound
return hcs.IsAlreadyStopped(getInnerError(err))
}
// IsNotSupported returns a boolean indicating whether the error is caused by
@ -240,12 +227,7 @@ func IsAlreadyStopped(err error) bool {
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
// is thrown from the Platform
func IsNotSupported(err error) bool {
err = getInnerError(err)
// If Platform doesn't recognize or support the request sent, below errors are seen
return err == ErrVmcomputeInvalidJSON ||
err == ErrInvalidData ||
err == ErrNotSupported ||
err == ErrVmcomputeUnknownMessage
return hcs.IsNotSupported(getInnerError(err))
}
func getInnerError(err error) error {
@ -259,3 +241,17 @@ func getInnerError(err error) error {
}
return err
}
func convertSystemError(err error, c *container) error {
if serr, ok := err.(*hcs.SystemError); ok {
return &ContainerError{Container: c, Operation: serr.Op, ExtraInfo: serr.Extra, Err: serr.Err, Events: serr.Events}
}
return err
}
func convertProcessError(err error, p *process) error {
if perr, ok := err.(*hcs.ProcessError); ok {
return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events}
}
return err
}

View file

@ -1,26 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// ExpandSandboxSize expands the size of a layer to at least size bytes.
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
title := "hcsshim::ExpandSandboxSize "
logrus.Debugf(title+"layerId=%s size=%d", layerId, size)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = expandSandboxSize(&infop, layerId, size)
if err != nil {
err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size)
return nil
}

View file

@ -1,19 +0,0 @@
package hcsshim
import (
"crypto/sha1"
"fmt"
)
type GUID [16]byte
func NewGUID(source string) *GUID {
h := sha1.Sum([]byte(source))
var g GUID
copy(g[0:], h[0:16])
return &g
}
func (g *GUID) ToString() string {
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
}

View file

@ -4,80 +4,20 @@
package hcsshim
import (
"fmt"
"syscall"
"unsafe"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/hcserror"
)
//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go safeopen.go
//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid?
//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings?
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
const (
// Specific user-visible exit codes
WaitErrExecFailed = 32767
ERROR_GEN_FAILURE = syscall.Errno(31)
ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE
ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115)
WSAEINVAL = syscall.Errno(10022)
@ -85,82 +25,4 @@ const (
TimeoutInfinite = 0xFFFFFFFF
)
type HcsError struct {
title string
rest string
Err error
}
type hcsSystem syscall.Handle
type hcsProcess syscall.Handle
type hcsCallback syscall.Handle
type hcsProcessInformation struct {
ProcessId uint32
Reserved uint32
StdInput syscall.Handle
StdOutput syscall.Handle
StdError syscall.Handle
}
func makeError(err error, title, rest string) error {
// Pass through DLL errors directly since they do not originate from HCS.
if _, ok := err.(*syscall.DLLError); ok {
return err
}
return &HcsError{title, rest, err}
}
func makeErrorf(err error, title, format string, a ...interface{}) error {
return makeError(err, title, fmt.Sprintf(format, a...))
}
func win32FromError(err error) uint32 {
if herr, ok := err.(*HcsError); ok {
return win32FromError(herr.Err)
}
if code, ok := err.(syscall.Errno); ok {
return uint32(code)
}
return uint32(ERROR_GEN_FAILURE)
}
func win32FromHresult(hr uintptr) uintptr {
if hr&0x1fff0000 == 0x00070000 {
return hr & 0xffff
}
return hr
}
func (e *HcsError) Error() string {
s := e.title
if len(s) > 0 && s[len(s)-1] != ' ' {
s += " "
}
s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err))
if e.rest != "" {
if e.rest[0] != ' ' {
s += " "
}
s += e.rest
}
return s
}
func convertAndFreeCoTaskMemString(buffer *uint16) string {
str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:])
coTaskMemFree(unsafe.Pointer(buffer))
return str
}
func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
return []byte(convertAndFreeCoTaskMemString(buffer))
}
func processHcsResult(err error, resultp *uint16) error {
if resultp != nil {
result := convertAndFreeCoTaskMemString(resultp)
logrus.Debugf("Result: %s", result)
}
return err
}
type HcsError = hcserror.HcsError

View file

@ -1,29 +1,11 @@
package hcsshim
import (
"encoding/json"
"net"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/hns"
)
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint struct {
Id string `json:"ID,omitempty"`
Name string `json:",omitempty"`
VirtualNetwork string `json:",omitempty"`
VirtualNetworkName string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacAddress string `json:",omitempty"`
IPAddress net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"`
DisableICC bool `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
IsRemoteEndpoint bool `json:",omitempty"`
}
type HNSEndpoint = hns.HNSEndpoint
//SystemType represents the type of the system on which actions are done
type SystemType string
@ -37,39 +19,19 @@ const (
// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system
// Supported resource types are Network and Request Types are Add/Remove
type EndpointAttachDetachRequest struct {
ContainerID string `json:"ContainerId,omitempty"`
SystemType SystemType `json:"SystemType"`
CompartmentID uint16 `json:"CompartmentId,omitempty"`
VirtualNICName string `json:"VirtualNicName,omitempty"`
}
type EndpointAttachDetachRequest = hns.EndpointAttachDetachRequest
// EndpointResquestResponse is object to get the endpoint request response
type EndpointResquestResponse struct {
Success bool
Error string
}
type EndpointResquestResponse = hns.EndpointResquestResponse
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
endpoint := &HNSEndpoint{}
err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
return hns.HNSEndpointRequest(method, path, request)
}
// HNSListEndpointRequest makes a HNS call to query the list of available endpoints
func HNSListEndpointRequest() ([]HNSEndpoint, error) {
var endpoint []HNSEndpoint
err := hnsCall("GET", "/endpoints/", "", &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
return hns.HNSListEndpointRequest()
}
// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container
@ -120,204 +82,10 @@ func modifyNetworkEndpoint(containerID string, endpointID string, request Reques
// GetHNSEndpointByID get the Endpoint by ID
func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
return HNSEndpointRequest("GET", endpointID, "")
return hns.GetHNSEndpointByID(endpointID)
}
// GetHNSEndpointByName gets the endpoint filtered by Name
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
hnsResponse, err := HNSListEndpointRequest()
if err != nil {
return nil, err
}
for _, hnsEndpoint := range hnsResponse {
if hnsEndpoint.Name == endpointName {
return &hnsEndpoint, nil
}
}
return nil, EndpointNotFoundError{EndpointName: endpointName}
}
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
operation := "Create"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
jsonString, err := json.Marshal(endpoint)
if err != nil {
return nil, err
}
return HNSEndpointRequest("POST", "", string(jsonString))
}
// Delete Endpoint by sending EndpointRequest to HNS
func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) {
operation := "Delete"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
return HNSEndpointRequest("DELETE", endpoint.Id, "")
}
// Update Endpoint
func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
operation := "Update"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
jsonString, err := json.Marshal(endpoint)
if err != nil {
return nil, err
}
err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint)
return endpoint, err
}
// ContainerHotAttach attaches an endpoint to a running container
func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error {
operation := "ContainerHotAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
return modifyNetworkEndpoint(containerID, endpoint.Id, Add)
}
// ContainerHotDetach detaches an endpoint from a running container
func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error {
operation := "ContainerHotDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID)
return modifyNetworkEndpoint(containerID, endpoint.Id, Remove)
}
// ApplyACLPolicy applies a set of ACL Policies on the Endpoint
func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error {
operation := "ApplyACLPolicy"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
for _, policy := range policies {
if policy == nil {
continue
}
jsonString, err := json.Marshal(policy)
if err != nil {
return err
}
endpoint.Policies = append(endpoint.Policies, jsonString)
}
_, err := endpoint.Update()
return err
}
// ContainerAttach attaches an endpoint to container
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
operation := "ContainerAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
CompartmentID: compartmentID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// ContainerDetach detaches an endpoint from container
func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error {
operation := "ContainerDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// HostAttach attaches a nic on the host
func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
operation := "HostAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
CompartmentID: compartmentID,
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// HostDetach detaches a nic on the host
func (endpoint *HNSEndpoint) HostDetach() error {
operation := "HostDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// VirtualMachineNICAttach attaches a endpoint to a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error {
operation := "VirtualMachineNicAttach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
VirtualNICName: virtualMachineNICName,
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// VirtualMachineNICDetach detaches a endpoint from a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error {
operation := "VirtualMachineNicDetach"
title := "HCSShim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
return hns.GetHNSEndpointByName(endpointName)
}

View file

@ -0,0 +1,16 @@
package hcsshim
import (
"github.com/Microsoft/hcsshim/internal/hns"
)
type HNSGlobals = hns.HNSGlobals
type HNSVersion = hns.HNSVersion
var (
HNSVersion1803 = hns.HNSVersion1803
)
func GetHNSGlobals() (*HNSGlobals, error) {
return hns.GetHNSGlobals()
}

View file

@ -1,141 +1,36 @@
package hcsshim
import (
"encoding/json"
"net"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/hns"
)
// Subnet is assoicated with a network and represents a list
// of subnets available to the network
type Subnet struct {
AddressPrefix string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
}
type Subnet = hns.Subnet
// MacPool is assoicated with a network and represents a list
// of macaddresses available to the network
type MacPool struct {
StartMacAddress string `json:",omitempty"`
EndMacAddress string `json:",omitempty"`
}
type MacPool = hns.MacPool
// HNSNetwork represents a network in HNS
type HNSNetwork struct {
Id string `json:"ID,omitempty"`
Name string `json:",omitempty"`
Type string `json:",omitempty"`
NetworkAdapterName string `json:",omitempty"`
SourceMac string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacPools []MacPool `json:",omitempty"`
Subnets []Subnet `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
DNSServerCompartment uint32 `json:",omitempty"`
ManagementIP string `json:",omitempty"`
AutomaticDNS bool `json:",omitempty"`
}
type hnsNetworkResponse struct {
Success bool
Error string
Output HNSNetwork
}
type hnsResponse struct {
Success bool
Error string
Output json.RawMessage
}
type HNSNetwork = hns.HNSNetwork
// HNSNetworkRequest makes a call into HNS to update/query a single network
func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) {
var network HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return &network, nil
return hns.HNSNetworkRequest(method, path, request)
}
// HNSListNetworkRequest makes a HNS call to query the list of available networks
func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) {
var network []HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return network, nil
return hns.HNSListNetworkRequest(method, path, request)
}
// GetHNSNetworkByID
func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) {
return HNSNetworkRequest("GET", networkID, "")
return hns.GetHNSNetworkByID(networkID)
}
// GetHNSNetworkName filtered by Name
func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) {
hsnnetworks, err := HNSListNetworkRequest("GET", "", "")
if err != nil {
return nil, err
}
for _, hnsnetwork := range hsnnetworks {
if hnsnetwork.Name == networkName {
return &hnsnetwork, nil
}
}
return nil, NetworkNotFoundError{NetworkName: networkName}
}
// Create Network by sending NetworkRequest to HNS.
func (network *HNSNetwork) Create() (*HNSNetwork, error) {
operation := "Create"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
jsonString, err := json.Marshal(network)
if err != nil {
return nil, err
}
return HNSNetworkRequest("POST", "", string(jsonString))
}
// Delete Network by sending NetworkRequest to HNS
func (network *HNSNetwork) Delete() (*HNSNetwork, error) {
operation := "Delete"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
return HNSNetworkRequest("DELETE", network.Id, "")
}
// Creates an endpoint on the Network.
func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint {
return &HNSEndpoint{
VirtualNetwork: network.Id,
IPAddress: ipAddress,
MacAddress: string(macAddress),
}
}
func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
operation := "CreateEndpoint"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id)
endpoint.VirtualNetwork = network.Id
return endpoint.Create()
}
func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
operation := "CreateRemoteEndpoint"
title := "HCSShim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
endpoint.IsRemoteEndpoint = true
return network.CreateEndpoint(endpoint)
return hns.GetHNSNetworkByName(networkName)
}

View file

@ -1,94 +1,57 @@
package hcsshim
import (
"github.com/Microsoft/hcsshim/internal/hns"
)
// Type of Request Support in ModifySystem
type PolicyType string
type PolicyType = hns.PolicyType
// RequestType const
const (
Nat PolicyType = "NAT"
ACL PolicyType = "ACL"
PA PolicyType = "PA"
VLAN PolicyType = "VLAN"
VSID PolicyType = "VSID"
VNet PolicyType = "VNET"
L2Driver PolicyType = "L2Driver"
Isolation PolicyType = "Isolation"
QOS PolicyType = "QOS"
OutboundNat PolicyType = "OutBoundNAT"
ExternalLoadBalancer PolicyType = "ELB"
Route PolicyType = "ROUTE"
Nat = hns.Nat
ACL = hns.ACL
PA = hns.PA
VLAN = hns.VLAN
VSID = hns.VSID
VNet = hns.VNet
L2Driver = hns.L2Driver
Isolation = hns.Isolation
QOS = hns.QOS
OutboundNat = hns.OutboundNat
ExternalLoadBalancer = hns.ExternalLoadBalancer
Route = hns.Route
)
type NatPolicy struct {
Type PolicyType `json:"Type"`
Protocol string
InternalPort uint16
ExternalPort uint16
}
type NatPolicy = hns.NatPolicy
type QosPolicy struct {
Type PolicyType `json:"Type"`
MaximumOutgoingBandwidthInBytes uint64
}
type QosPolicy = hns.QosPolicy
type IsolationPolicy struct {
Type PolicyType `json:"Type"`
VLAN uint
VSID uint
InDefaultIsolation bool
}
type IsolationPolicy = hns.IsolationPolicy
type VlanPolicy struct {
Type PolicyType `json:"Type"`
VLAN uint
}
type VlanPolicy = hns.VlanPolicy
type VsidPolicy struct {
Type PolicyType `json:"Type"`
VSID uint
}
type VsidPolicy = hns.VsidPolicy
type PaPolicy struct {
Type PolicyType `json:"Type"`
PA string `json:"PA"`
}
type PaPolicy = hns.PaPolicy
type OutboundNatPolicy struct {
Policy
VIP string `json:"VIP,omitempty"`
Exceptions []string `json:"ExceptionList,omitempty"`
}
type OutboundNatPolicy = hns.OutboundNatPolicy
type ActionType string
type DirectionType string
type RuleType string
type ActionType = hns.ActionType
type DirectionType = hns.DirectionType
type RuleType = hns.RuleType
const (
Allow ActionType = "Allow"
Block ActionType = "Block"
Allow = hns.Allow
Block = hns.Block
In DirectionType = "In"
Out DirectionType = "Out"
In = hns.In
Out = hns.Out
Host RuleType = "Host"
Switch RuleType = "Switch"
Host = hns.Host
Switch = hns.Switch
)
type ACLPolicy struct {
Type PolicyType `json:"Type"`
Protocol uint16
InternalPort uint16
Action ActionType
Direction DirectionType
LocalAddresses string
RemoteAddresses string
LocalPort uint16
RemotePort uint16
RuleType RuleType `json:"RuleType,omitempty"`
Priority uint16
ServiceName string
}
type ACLPolicy = hns.ACLPolicy
type Policy struct {
Type PolicyType `json:"Type"`
}
type Policy = hns.Policy

View file

@ -1,200 +1,47 @@
package hcsshim
import (
"encoding/json"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/hns"
)
// RoutePolicy is a structure defining schema for Route based Policy
type RoutePolicy struct {
Policy
DestinationPrefix string `json:"DestinationPrefix,omitempty"`
NextHop string `json:"NextHop,omitempty"`
EncapEnabled bool `json:"NeedEncap,omitempty"`
}
type RoutePolicy = hns.RoutePolicy
// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy
type ELBPolicy struct {
LBPolicy
SourceVIP string `json:"SourceVIP,omitempty"`
VIPs []string `json:"VIPs,omitempty"`
ILB bool `json:"ILB,omitempty"`
}
type ELBPolicy = hns.ELBPolicy
// LBPolicy is a structure defining schema for LoadBalancing based Policy
type LBPolicy struct {
Policy
Protocol uint16 `json:"Protocol,omitempty"`
InternalPort uint16
ExternalPort uint16
}
type LBPolicy = hns.LBPolicy
// PolicyList is a structure defining schema for Policy list request
type PolicyList struct {
ID string `json:"ID,omitempty"`
EndpointReferences []string `json:"References,omitempty"`
Policies []json.RawMessage `json:"Policies,omitempty"`
}
type PolicyList = hns.PolicyList
// HNSPolicyListRequest makes a call into HNS to update/query a single network
func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) {
var policy PolicyList
err := hnsCall(method, "/policylists/"+path, request, &policy)
if err != nil {
return nil, err
}
return &policy, nil
return hns.HNSPolicyListRequest(method, path, request)
}
// HNSListPolicyListRequest gets all the policy list
func HNSListPolicyListRequest() ([]PolicyList, error) {
var plist []PolicyList
err := hnsCall("GET", "/policylists/", "", &plist)
if err != nil {
return nil, err
}
return plist, nil
return hns.HNSListPolicyListRequest()
}
// PolicyListRequest makes a HNS call to modify/query a network policy list
func PolicyListRequest(method, path, request string) (*PolicyList, error) {
policylist := &PolicyList{}
err := hnsCall(method, "/policylists/"+path, request, &policylist)
if err != nil {
return nil, err
}
return policylist, nil
return hns.PolicyListRequest(method, path, request)
}
// GetPolicyListByID get the policy list by ID
func GetPolicyListByID(policyListID string) (*PolicyList, error) {
return PolicyListRequest("GET", policyListID, "")
}
// Create PolicyList by sending PolicyListRequest to HNS.
func (policylist *PolicyList) Create() (*PolicyList, error) {
operation := "Create"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.ID)
jsonString, err := json.Marshal(policylist)
if err != nil {
return nil, err
}
return PolicyListRequest("POST", "", string(jsonString))
}
// Delete deletes PolicyList
func (policylist *PolicyList) Delete() (*PolicyList, error) {
operation := "Delete"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.ID)
return PolicyListRequest("DELETE", policylist.ID, "")
}
// AddEndpoint add an endpoint to a Policy List
func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "AddEndpoint"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete()
if err != nil {
return nil, err
}
// Add Endpoint to the Existing List
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
return policylist.Create()
}
// RemoveEndpoint removes an endpoint from the Policy List
func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "RemoveEndpoint"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete()
if err != nil {
return nil, err
}
elementToRemove := "/endpoints/" + endpoint.Id
var references []string
for _, endpointReference := range policylist.EndpointReferences {
if endpointReference == elementToRemove {
continue
}
references = append(references, endpointReference)
}
policylist.EndpointReferences = references
return policylist.Create()
return hns.GetPolicyListByID(policyListID)
}
// AddLoadBalancer policy list for the specified endpoints
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
operation := "AddLoadBalancer"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
policylist := &PolicyList{}
elbPolicy := &ELBPolicy{
SourceVIP: sourceVIP,
ILB: isILB,
}
if len(vip) > 0 {
elbPolicy.VIPs = []string{vip}
}
elbPolicy.Type = ExternalLoadBalancer
elbPolicy.Protocol = protocol
elbPolicy.InternalPort = internalPort
elbPolicy.ExternalPort = externalPort
for _, endpoint := range endpoints {
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
}
jsonString, err := json.Marshal(elbPolicy)
if err != nil {
return nil, err
}
policylist.Policies = append(policylist.Policies, jsonString)
return policylist.Create()
return hns.AddLoadBalancer(endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
}
// AddRoute adds route policy list for the specified endpoints
func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) {
operation := "AddRoute"
title := "HCSShim::PolicyList::" + operation
logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix)
policylist := &PolicyList{}
rPolicy := &RoutePolicy{
DestinationPrefix: destinationPrefix,
NextHop: nextHop,
EncapEnabled: encapEnabled,
}
rPolicy.Type = Route
for _, endpoint := range endpoints {
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
}
jsonString, err := json.Marshal(rPolicy)
if err != nil {
return nil, err
}
policylist.Policies = append(policylist.Policies, jsonString)
return policylist.Create()
return hns.AddRoute(endpoints, destinationPrefix, nextHop, encapEnabled)
}

View file

@ -0,0 +1,13 @@
package hcsshim
import (
"github.com/Microsoft/hcsshim/internal/hns"
)
type HNSSupportedFeatures = hns.HNSSupportedFeatures
type HNSAclFeatures = hns.HNSAclFeatures
func GetHNSSupportedFeatures() HNSSupportedFeatures {
return hns.GetHNSSupportedFeatures()
}

View file

@ -1,106 +1,27 @@
package hcsshim
import (
"encoding/json"
"io"
"time"
"github.com/Microsoft/hcsshim/internal/schema1"
)
// ProcessConfig is used as both the input of Container.CreateProcess
// and to convert the parameters to JSON for passing onto the HCS
type ProcessConfig struct {
ApplicationName string `json:",omitempty"`
CommandLine string `json:",omitempty"`
CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows
User string `json:",omitempty"`
WorkingDirectory string `json:",omitempty"`
Environment map[string]string `json:",omitempty"`
EmulateConsole bool `json:",omitempty"`
CreateStdInPipe bool `json:",omitempty"`
CreateStdOutPipe bool `json:",omitempty"`
CreateStdErrPipe bool `json:",omitempty"`
ConsoleSize [2]uint `json:",omitempty"`
CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows
OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows
}
type ProcessConfig = schema1.ProcessConfig
type Layer struct {
ID string
Path string
}
type MappedDir struct {
HostPath string
ContainerPath string
ReadOnly bool
BandwidthMaximum uint64
IOPSMaximum uint64
CreateInUtilityVM bool
}
type MappedPipe struct {
HostPath string
ContainerPipeName string
}
type HvRuntime struct {
ImagePath string `json:",omitempty"`
SkipTemplate bool `json:",omitempty"`
LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM
LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM
LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode
BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD
WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD
}
type MappedVirtualDisk struct {
HostPath string `json:",omitempty"` // Path to VHD on the host
ContainerPath string // Platform-specific mount point path in the container
CreateInUtilityVM bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing"
AttachOnly bool `json:",omitempty:`
}
type Layer = schema1.Layer
type MappedDir = schema1.MappedDir
type MappedPipe = schema1.MappedPipe
type HvRuntime = schema1.HvRuntime
type MappedVirtualDisk = schema1.MappedVirtualDisk
// ContainerConfig is used as both the input of CreateContainer
// and to convert the parameters to JSON for passing onto the HCS
type ContainerConfig struct {
SystemType string // HCS requires this to be hard-coded to "Container"
Name string // Name of the container. We use the docker ID.
Owner string `json:",omitempty"` // The management platform that created this container
VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID}
IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows
LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID
Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID
Credentials string `json:",omitempty"` // Credentials information
ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container.
ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares.
ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit.
StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS
StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second
StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller
MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes
HostName string `json:",omitempty"` // Hostname
MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts)
MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes
HvPartition bool // True if it a Hyper-V Container
NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with.
EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container
HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
Servicing bool `json:",omitempty"` // True if this container is for servicing
AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution
ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
}
type ContainerConfig = schema1.ContainerConfig
type ComputeSystemQuery struct {
IDs []string `json:"Ids,omitempty"`
Types []string `json:",omitempty"`
Names []string `json:",omitempty"`
Owners []string `json:",omitempty"`
}
type ComputeSystemQuery = schema1.ComputeSystemQuery
// Container represents a created (but not necessarily running) container.
type Container interface {

View file

@ -0,0 +1,69 @@
package guid
import (
"crypto/rand"
"encoding/json"
"fmt"
"io"
"strconv"
"strings"
)
var _ = (json.Marshaler)(&GUID{})
var _ = (json.Unmarshaler)(&GUID{})
type GUID [16]byte
func New() GUID {
g := GUID{}
_, err := io.ReadFull(rand.Reader, g[:])
if err != nil {
panic(err)
}
return g
}
func (g GUID) String() string {
return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:])
}
func FromString(s string) GUID {
if len(s) != 36 {
panic(fmt.Sprintf("invalid GUID length: %d", len(s)))
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
panic("invalid GUID format")
}
indexOrder := [16]int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34,
}
byteOrder := [16]int{
3, 2, 1, 0,
5, 4,
7, 6,
8, 9,
10, 11, 12, 13, 14, 15,
}
var g GUID
for i, x := range indexOrder {
b, err := strconv.ParseInt(s[x:x+2], 16, 16)
if err != nil {
panic(err)
}
g[byteOrder[i]] = byte(b)
}
return g
}
func (g GUID) MarshalJSON() ([]byte, error) {
return json.Marshal(g.String())
}
func (g *GUID) UnmarshalJSON(data []byte) error {
*g = FromString(strings.Trim(string(data), "\""))
return nil
}

View file

@ -1,8 +1,10 @@
package hcsshim
package hcs
import (
"sync"
"syscall"
"github.com/Microsoft/hcsshim/internal/interop"
)
var (
@ -62,7 +64,7 @@ func closeChannels(channels notificationChannels) {
func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr {
var result error
if int32(notificationStatus) < 0 {
result = syscall.Errno(win32FromHresult(notificationStatus))
result = interop.Win32FromHresult(notificationStatus)
}
callbackMapLock.RLock()

View file

@ -1,4 +1,4 @@
package hcsshim
package hcs
import "C"

View file

@ -0,0 +1,279 @@
package hcs
import (
"encoding/json"
"errors"
"fmt"
"syscall"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
var (
// ErrComputeSystemDoesNotExist is an error encountered when the container being operated on no longer exists
ErrComputeSystemDoesNotExist = syscall.Errno(0xc037010e)
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrElementNotFound = syscall.Errno(0x490)
// ErrElementNotFound is an error encountered when the object being referenced does not exist
ErrNotSupported = syscall.Errno(0x32)
// ErrInvalidData is an error encountered when the request being sent to hcs is invalid/unsupported
// decimal -2147024883 / hex 0x8007000d
ErrInvalidData = syscall.Errno(0xd)
// ErrHandleClose is an error encountered when the handle generating the notification being waited on has been closed
ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed")
// ErrAlreadyClosed is an error encountered when using a handle that has been closed by the Close method
ErrAlreadyClosed = errors.New("hcsshim: the handle has already been closed")
// ErrInvalidNotificationType is an error encountered when an invalid notification type is used
ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type")
// ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation
ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation")
// ErrTimeout is an error encountered when waiting on a notification times out
ErrTimeout = errors.New("hcsshim: timeout waiting for notification")
// ErrUnexpectedContainerExit is the error encountered when a container exits while waiting for
// a different expected notification
ErrUnexpectedContainerExit = errors.New("unexpected container exit")
// ErrUnexpectedProcessAbort is the error encountered when communication with the compute service
// is lost while waiting for a notification
ErrUnexpectedProcessAbort = errors.New("lost communication with compute service")
// ErrUnexpectedValue is an error encountered when hcs returns an invalid value
ErrUnexpectedValue = errors.New("unexpected value returned from hcs")
// ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container
ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110)
// ErrVmcomputeOperationPending is an error encountered when the operation is being completed asynchronously
ErrVmcomputeOperationPending = syscall.Errno(0xC0370103)
// ErrVmcomputeOperationInvalidState is an error encountered when the compute system is not in a valid state for the requested operation
ErrVmcomputeOperationInvalidState = syscall.Errno(0xc0370105)
// ErrProcNotFound is an error encountered when the the process cannot be found
ErrProcNotFound = syscall.Errno(0x7f)
// ErrVmcomputeOperationAccessIsDenied is an error which can be encountered when enumerating compute systems in RS1/RS2
// builds when the underlying silo might be in the process of terminating. HCS was fixed in RS3.
ErrVmcomputeOperationAccessIsDenied = syscall.Errno(0x5)
// ErrVmcomputeInvalidJSON is an error encountered when the compute system does not support/understand the messages sent by management
ErrVmcomputeInvalidJSON = syscall.Errno(0xc037010d)
// ErrVmcomputeUnknownMessage is an error encountered guest compute system doesn't support the message
ErrVmcomputeUnknownMessage = syscall.Errno(0xc037010b)
// ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request")
)
type ErrorEvent struct {
Message string `json:"Message,omitempty"` // Fully formated error message
StackTrace string `json:"StackTrace,omitempty"` // Stack trace in string form
Provider string `json:"Provider,omitempty"`
EventID uint16 `json:"EventId,omitempty"`
Flags uint32 `json:"Flags,omitempty"`
Source string `json:"Source,omitempty"`
//Data []EventData `json:"Data,omitempty"` // Omit this as HCS doesn't encode this well. It's more confusing to include. It is however logged in debug mode (see processHcsResult function)
}
type hcsResult struct {
Error int32
ErrorMessage string
ErrorEvents []ErrorEvent `json:"ErrorEvents,omitempty"`
}
func (ev *ErrorEvent) String() string {
evs := "[Event Detail: " + ev.Message
if ev.StackTrace != "" {
evs += " Stack Trace: " + ev.StackTrace
}
if ev.Provider != "" {
evs += " Provider: " + ev.Provider
}
if ev.EventID != 0 {
evs = fmt.Sprintf("%s EventID: %d", evs, ev.EventID)
}
if ev.Flags != 0 {
evs = fmt.Sprintf("%s flags: %d", evs, ev.Flags)
}
if ev.Source != "" {
evs += " Source: " + ev.Source
}
evs += "]"
return evs
}
func processHcsResult(resultp *uint16) []ErrorEvent {
if resultp != nil {
resultj := interop.ConvertAndFreeCoTaskMemString(resultp)
logrus.Debugf("Result: %s", resultj)
result := &hcsResult{}
if err := json.Unmarshal([]byte(resultj), result); err != nil {
logrus.Warnf("Could not unmarshal HCS result %s: %s", resultj, err)
return nil
}
return result.ErrorEvents
}
return nil
}
type HcsError struct {
Op string
Err error
Events []ErrorEvent
}
func (e *HcsError) Error() string {
s := e.Op + ": " + e.Err.Error()
for _, ev := range e.Events {
s += "\n" + ev.String()
}
return s
}
// ProcessError is an error encountered in HCS during an operation on a Process object
type ProcessError struct {
SystemID string
Pid int
Op string
Err error
Events []ErrorEvent
}
// SystemError is an error encountered in HCS during an operation on a Container object
type SystemError struct {
ID string
Op string
Err error
Extra string
Events []ErrorEvent
}
func (e *SystemError) Error() string {
s := e.Op + " " + e.ID + ": " + e.Err.Error()
for _, ev := range e.Events {
s += "\n" + ev.String()
}
if e.Extra != "" {
s += "\n(extra info: " + e.Extra + ")"
}
return s
}
func makeSystemError(system *System, op string, extra string, err error, events []ErrorEvent) error {
// Don't double wrap errors
if _, ok := err.(*SystemError); ok {
return err
}
return &SystemError{
ID: system.ID(),
Op: op,
Extra: extra,
Err: err,
Events: events,
}
}
func (e *ProcessError) Error() string {
s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error())
for _, ev := range e.Events {
s += "\n" + ev.String()
}
return s
}
func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error {
// Don't double wrap errors
if _, ok := err.(*ProcessError); ok {
return err
}
return &ProcessError{
Pid: process.Pid(),
SystemID: process.SystemID(),
Op: op,
Err: err,
Events: events,
}
}
// IsNotExist checks if an error is caused by the Container or Process not existing.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsNotExist(err error) bool {
err = getInnerError(err)
return err == ErrComputeSystemDoesNotExist ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool {
err = getInnerError(err)
return err == ErrAlreadyClosed
}
// IsPending returns a boolean indicating whether the error is that
// the requested operation is being completed in the background.
func IsPending(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeOperationPending
}
// IsTimeout returns a boolean indicating whether the error is caused by
// a timeout waiting for the operation to complete.
func IsTimeout(err error) bool {
err = getInnerError(err)
return err == ErrTimeout
}
// IsAlreadyStopped returns a boolean indicating whether the error is caused by
// a Container or Process being already stopped.
// Note: Currently, ErrElementNotFound can mean that a Process has either
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
// will currently return true when the error is ErrElementNotFound or ErrProcNotFound.
func IsAlreadyStopped(err error) bool {
err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped ||
err == ErrElementNotFound ||
err == ErrProcNotFound
}
// IsNotSupported returns a boolean indicating whether the error is caused by
// unsupported platform requests
// Note: Currently Unsupported platform requests can be mean either
// ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage
// is thrown from the Platform
func IsNotSupported(err error) bool {
err = getInnerError(err)
// If Platform doesn't recognize or support the request sent, below errors are seen
return err == ErrVmcomputeInvalidJSON ||
err == ErrInvalidData ||
err == ErrNotSupported ||
err == ErrVmcomputeUnknownMessage
}
func getInnerError(err error) error {
switch pe := err.(type) {
case nil:
return nil
case *HcsError:
err = pe.Err
case *SystemError:
err = pe.Err
case *ProcessError:
err = pe.Err
}
return err
}

View file

@ -0,0 +1,47 @@
// Shim for the Host Compute Service (HCS) to manage Windows Server
// containers and Hyper-V containers.
package hcs
import (
"syscall"
)
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hcs.go
//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems?
//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem?
//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem?
//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem?
//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem?
//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem?
//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem?
//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem?
//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem?
//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties?
//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem?
//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback?
//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback?
//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess?
//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess?
//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess?
//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess?
//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo?
//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties?
//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess?
//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties?
//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback?
//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback?
type hcsSystem syscall.Handle
type hcsProcess syscall.Handle
type hcsCallback syscall.Handle
type hcsProcessInformation struct {
ProcessId uint32
Reserved uint32
StdInput syscall.Handle
StdOutput syscall.Handle
StdError syscall.Handle
}

View file

@ -0,0 +1,393 @@
package hcs
import (
"encoding/json"
"fmt"
"io"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
// ContainerError is an error encountered in HCS
type Process struct {
handleLock sync.RWMutex
handle hcsProcess
processID int
system *System
cachedPipes *cachedPipes
callbackNumber uintptr
}
type cachedPipes struct {
stdIn syscall.Handle
stdOut syscall.Handle
stdErr syscall.Handle
}
type processModifyRequest struct {
Operation string
ConsoleSize *consoleSize `json:",omitempty"`
CloseHandle *closeHandle `json:",omitempty"`
}
type consoleSize struct {
Height uint16
Width uint16
}
type closeHandle struct {
Handle string
}
type ProcessStatus struct {
ProcessID uint32
Exited bool
ExitCode uint32
LastWaitResult int32
}
const (
stdIn string = "StdIn"
stdOut string = "StdOut"
stdErr string = "StdErr"
)
const (
modifyConsoleSize string = "ConsoleSize"
modifyCloseHandle string = "CloseHandle"
)
// Pid returns the process ID of the process within the container.
func (process *Process) Pid() int {
return process.processID
}
// SystemID returns the ID of the process's compute system.
func (process *Process) SystemID() string {
return process.system.ID()
}
// Kill signals the process to terminate but does not wait for it to finish terminating.
func (process *Process) Kill() error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Kill"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("TerminateProcess %s: %d", process.SystemID(), process.Pid()), &completed)
err := hcsTerminateProcess(process.handle, &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Wait waits for the process to exit.
func (process *Process) Wait() error {
operation := "Wait"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
return makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
func (process *Process) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
return makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// ResizeConsole resizes the console of the process.
func (process *Process) ResizeConsole(width, height uint16) error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "ResizeConsole"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
modifyRequest := processModifyRequest{
Operation: modifyConsoleSize,
ConsoleSize: &consoleSize{
Height: height,
Width: width,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *Process) Properties() (*ProcessStatus, error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Properties"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var (
resultp *uint16
propertiesp *uint16
)
completed := false
go syscallWatcher(fmt.Sprintf("GetProcessProperties %s: %d", process.SystemID(), process.Pid()), &completed)
err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return nil, makeProcessError(process, operation, err, events)
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp)
properties := &ProcessStatus{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
return properties, nil
}
// ExitCode returns the exit code of the process. The process must have
// already terminated.
func (process *Process) ExitCode() (int, error) {
operation := "ExitCode"
properties, err := process.Properties()
if err != nil {
return 0, makeProcessError(process, operation, err, nil)
}
if properties.Exited == false {
return 0, makeProcessError(process, operation, ErrInvalidProcessState, nil)
}
if properties.LastWaitResult != 0 {
return 0, makeProcessError(process, operation, syscall.Errno(properties.LastWaitResult), nil)
}
return int(properties.ExitCode), nil
}
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
func (process *Process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Stdio"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return nil, nil, nil, makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
var stdIn, stdOut, stdErr syscall.Handle
if process.cachedPipes == nil {
var (
processInfo hcsProcessInformation
resultp *uint16
)
err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
events := processHcsResult(resultp)
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, err, events)
}
stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
} else {
// Use cached pipes
stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
// Invalidate the cache
process.cachedPipes = nil
}
pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return pipes[0], pipes[1], pipes[2], nil
}
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
func (process *Process) CloseStdin() error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "CloseStdin"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
}
modifyRequest := processModifyRequest{
Operation: modifyCloseHandle,
CloseHandle: &closeHandle{
Handle: stdIn,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
events := processHcsResult(resultp)
if err != nil {
return makeProcessError(process, operation, err, events)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
// Close cleans up any state associated with the process but does not kill
// or wait on it.
func (process *Process) Close() error {
process.handleLock.Lock()
defer process.handleLock.Unlock()
operation := "Close"
title := "hcsshim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
// Don't double free this
if process.handle == 0 {
return nil
}
if err := process.unregisterCallback(); err != nil {
return makeProcessError(process, operation, err, nil)
}
if err := hcsCloseProcess(process.handle); err != nil {
return makeProcessError(process, operation, err, nil)
}
process.handle = 0
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *Process) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
process.callbackNumber = callbackNumber
return nil
}
func (process *Process) unregisterCallback() error {
callbackNumber := process.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterProcessCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterProcessCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}

View file

@ -0,0 +1,585 @@
package hcs
import (
"encoding/json"
"fmt"
"os"
"strconv"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/schema1"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/sirupsen/logrus"
)
// currentContainerStarts is used to limit the number of concurrent container
// starts.
var currentContainerStarts containerStarts
type containerStarts struct {
maxParallel int
inProgress int
sync.Mutex
}
func init() {
mpsS := os.Getenv("HCSSHIM_MAX_PARALLEL_START")
if len(mpsS) > 0 {
mpsI, err := strconv.Atoi(mpsS)
if err != nil || mpsI < 0 {
return
}
currentContainerStarts.maxParallel = mpsI
}
}
type System struct {
handleLock sync.RWMutex
handle hcsSystem
id string
callbackNumber uintptr
}
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(id string, hcsDocumentInterface interface{}) (*System, error) {
operation := "CreateComputeSystem"
title := "hcsshim::" + operation
computeSystem := &System{
id: id,
}
hcsDocumentB, err := json.Marshal(hcsDocumentInterface)
if err != nil {
return nil, err
}
hcsDocument := string(hcsDocumentB)
logrus.Debugf(title+" ID=%s config=%s", id, hcsDocument)
var (
resultp *uint16
identity syscall.Handle
)
completed := false
go syscallWatcher(fmt.Sprintf("CreateCompleteSystem %s: %s", id, hcsDocument), &completed)
createError := hcsCreateComputeSystem(id, hcsDocument, identity, &computeSystem.handle, &resultp)
completed = true
if createError == nil || IsPending(createError) {
if err := computeSystem.registerCallback(); err != nil {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
computeSystem.Terminate()
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
}
events, err := processAsyncHcsResult(createError, resultp, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
if err != nil {
if err == ErrTimeout {
// Terminate the compute system if it still exists. We're okay to
// ignore a failure here.
computeSystem.Terminate()
}
return nil, makeSystemError(computeSystem, operation, hcsDocument, err, events)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, computeSystem.handle)
return computeSystem, nil
}
// OpenComputeSystem opens an existing compute system by ID.
func OpenComputeSystem(id string) (*System, error) {
operation := "OpenComputeSystem"
title := "hcsshim::" + operation
logrus.Debugf(title+" ID=%s", id)
computeSystem := &System{
id: id,
}
var (
handle hcsSystem
resultp *uint16
)
err := hcsOpenComputeSystem(id, &handle, &resultp)
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, events)
}
computeSystem.handle = handle
if err := computeSystem.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, operation, "", err, nil)
}
logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle)
return computeSystem, nil
}
// GetComputeSystems gets a list of the compute systems on the system that match the query
func GetComputeSystems(q schema1.ComputeSystemQuery) ([]schema1.ContainerProperties, error) {
operation := "GetComputeSystems"
title := "hcsshim::" + operation
queryb, err := json.Marshal(q)
if err != nil {
return nil, err
}
query := string(queryb)
logrus.Debugf(title+" query=%s", query)
var (
resultp *uint16
computeSystemsp *uint16
)
completed := false
go syscallWatcher(fmt.Sprintf("GetComputeSystems %s:", query), &completed)
err = hcsEnumerateComputeSystems(query, &computeSystemsp, &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return nil, &HcsError{Op: operation, Err: err, Events: events}
}
if computeSystemsp == nil {
return nil, ErrUnexpectedValue
}
computeSystemsRaw := interop.ConvertAndFreeCoTaskMemBytes(computeSystemsp)
computeSystems := []schema1.ContainerProperties{}
if err := json.Unmarshal(computeSystemsRaw, &computeSystems); err != nil {
return nil, err
}
logrus.Debugf(title + " succeeded")
return computeSystems, nil
}
// Start synchronously starts the computeSystem.
func (computeSystem *System) Start() error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Start ID=" + computeSystem.ID()
logrus.Debugf(title)
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Start", "", ErrAlreadyClosed, nil)
}
// This is a very simple backoff-retry loop to limit the number
// of parallel container starts if environment variable
// HCSSHIM_MAX_PARALLEL_START is set to a positive integer.
// It should generally only be used as a workaround to various
// platform issues that exist between RS1 and RS4 as of Aug 2018
if currentContainerStarts.maxParallel > 0 {
for {
currentContainerStarts.Lock()
if currentContainerStarts.inProgress < currentContainerStarts.maxParallel {
currentContainerStarts.inProgress++
currentContainerStarts.Unlock()
break
}
if currentContainerStarts.inProgress == currentContainerStarts.maxParallel {
currentContainerStarts.Unlock()
time.Sleep(100 * time.Millisecond)
}
}
// Make sure we decrement the count when we are done.
defer func() {
currentContainerStarts.Lock()
currentContainerStarts.inProgress--
currentContainerStarts.Unlock()
}()
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("StartComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsStartComputeSystem(computeSystem.handle, "", &resultp)
completed = true
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart)
if err != nil {
return makeSystemError(computeSystem, "Start", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// ID returns the compute system's identifier.
func (computeSystem *System) ID() string {
return computeSystem.id
}
// Shutdown requests a compute system shutdown, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (computeSystem *System) Shutdown() error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Shutdown"
logrus.Debugf(title)
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Shutdown", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("ShutdownComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsShutdownComputeSystem(computeSystem.handle, "", &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Shutdown", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// Terminate requests a compute system terminate, if IsPending() on the error returned is true,
// it may not actually be shut down until Wait() succeeds.
func (computeSystem *System) Terminate() error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Terminate ID=" + computeSystem.ID()
logrus.Debugf(title)
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Terminate", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("TerminateComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsTerminateComputeSystem(computeSystem.handle, "", &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Terminate", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// Wait synchronously waits for the compute system to shutdown or terminate.
func (computeSystem *System) Wait() error {
title := "hcsshim::ComputeSystem::Wait ID=" + computeSystem.ID()
logrus.Debugf(title)
err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
if err != nil {
return makeSystemError(computeSystem, "Wait", "", err, nil)
}
logrus.Debugf(title + " succeeded")
return nil
}
// WaitTimeout synchronously waits for the compute system to terminate or the duration to elapse.
// If the timeout expires, IsTimeout(err) == true
func (computeSystem *System) WaitTimeout(timeout time.Duration) error {
title := "hcsshim::ComputeSystem::WaitTimeout ID=" + computeSystem.ID()
logrus.Debugf(title)
err := waitForNotification(computeSystem.callbackNumber, hcsNotificationSystemExited, &timeout)
if err != nil {
return makeSystemError(computeSystem, "WaitTimeout", "", err, nil)
}
logrus.Debugf(title + " succeeded")
return nil
}
func (computeSystem *System) Properties(types ...schema1.PropertyType) (*schema1.ContainerProperties, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
queryj, err := json.Marshal(schema1.PropertyQuery{types})
if err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
}
var resultp, propertiesp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("GetComputeSystemProperties %s:", computeSystem.ID()), &completed)
err = hcsGetComputeSystemProperties(computeSystem.handle, string(queryj), &propertiesp, &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, events)
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := interop.ConvertAndFreeCoTaskMemBytes(propertiesp)
properties := &schema1.ContainerProperties{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, makeSystemError(computeSystem, "Properties", "", err, nil)
}
return properties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Pause() error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Pause ID=" + computeSystem.ID()
logrus.Debugf(title)
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Pause", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("PauseComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsPauseComputeSystem(computeSystem.handle, "", &resultp)
completed = true
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause)
if err != nil {
return makeSystemError(computeSystem, "Pause", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// Resume resumes the execution of the computeSystem. This feature is not enabled in TP5.
func (computeSystem *System) Resume() error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::Resume ID=" + computeSystem.ID()
logrus.Debugf(title)
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Resume", "", ErrAlreadyClosed, nil)
}
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("ResumeComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsResumeComputeSystem(computeSystem.handle, "", &resultp)
completed = true
events, err := processAsyncHcsResult(err, resultp, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume)
if err != nil {
return makeSystemError(computeSystem, "Resume", "", err, events)
}
logrus.Debugf(title + " succeeded")
return nil
}
// CreateProcess launches a new process within the computeSystem.
func (computeSystem *System) CreateProcess(c interface{}) (*Process, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::CreateProcess ID=" + computeSystem.ID()
var (
processInfo hcsProcessInformation
processHandle hcsProcess
resultp *uint16
)
if computeSystem.handle == 0 {
return nil, makeSystemError(computeSystem, "CreateProcess", "", ErrAlreadyClosed, nil)
}
configurationb, err := json.Marshal(c)
if err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
}
configuration := string(configurationb)
logrus.Debugf(title+" config=%s", configuration)
completed := false
go syscallWatcher(fmt.Sprintf("CreateProcess %s: %s", computeSystem.ID(), configuration), &completed)
err = hcsCreateProcess(computeSystem.handle, configuration, &processInfo, &processHandle, &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", configuration, err, events)
}
process := &Process{
handle: processHandle,
processID: int(processInfo.ProcessId),
system: computeSystem,
cachedPipes: &cachedPipes{
stdIn: processInfo.StdInput,
stdOut: processInfo.StdOutput,
stdErr: processInfo.StdError,
},
}
if err := process.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, "CreateProcess", "", err, nil)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return process, nil
}
// OpenProcess gets an interface to an existing process within the computeSystem.
func (computeSystem *System) OpenProcess(pid int) (*Process, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::ComputeSystem::OpenProcess ID=" + computeSystem.ID()
logrus.Debugf(title+" processid=%d", pid)
var (
processHandle hcsProcess
resultp *uint16
)
if computeSystem.handle == 0 {
return nil, makeSystemError(computeSystem, "OpenProcess", "", ErrAlreadyClosed, nil)
}
completed := false
go syscallWatcher(fmt.Sprintf("OpenProcess %s: %d", computeSystem.ID(), pid), &completed)
err := hcsOpenProcess(computeSystem.handle, uint32(pid), &processHandle, &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, events)
}
process := &Process{
handle: processHandle,
processID: pid,
system: computeSystem,
}
if err := process.registerCallback(); err != nil {
return nil, makeSystemError(computeSystem, "OpenProcess", "", err, nil)
}
logrus.Debugf(title+" succeeded processid=%s", process.processID)
return process, nil
}
// Close cleans up any state associated with the compute system but does not terminate or wait for it.
func (computeSystem *System) Close() error {
computeSystem.handleLock.Lock()
defer computeSystem.handleLock.Unlock()
title := "hcsshim::ComputeSystem::Close ID=" + computeSystem.ID()
logrus.Debugf(title)
// Don't double free this
if computeSystem.handle == 0 {
return nil
}
if err := computeSystem.unregisterCallback(); err != nil {
return makeSystemError(computeSystem, "Close", "", err, nil)
}
completed := false
go syscallWatcher(fmt.Sprintf("CloseComputeSystem %s:", computeSystem.ID()), &completed)
err := hcsCloseComputeSystem(computeSystem.handle)
completed = true
if err != nil {
return makeSystemError(computeSystem, "Close", "", err, nil)
}
computeSystem.handle = 0
logrus.Debugf(title + " succeeded")
return nil
}
func (computeSystem *System) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterComputeSystemCallback(computeSystem.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
computeSystem.callbackNumber = callbackNumber
return nil
}
func (computeSystem *System) unregisterCallback() error {
callbackNumber := computeSystem.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterComputeSystemCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterComputeSystemCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
}
// Modifies the System by sending a request to HCS
func (computeSystem *System) Modify(config interface{}) error {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
title := "hcsshim::Modify ID=" + computeSystem.id
if computeSystem.handle == 0 {
return makeSystemError(computeSystem, "Modify", "", ErrAlreadyClosed, nil)
}
requestJSON, err := json.Marshal(config)
if err != nil {
return err
}
requestString := string(requestJSON)
logrus.Debugf(title + " " + requestString)
var resultp *uint16
completed := false
go syscallWatcher(fmt.Sprintf("ModifyComputeSystem %s: %s", computeSystem.ID(), requestString), &completed)
err = hcsModifyComputeSystem(computeSystem.handle, requestString, &resultp)
completed = true
events := processHcsResult(resultp)
if err != nil {
return makeSystemError(computeSystem, "Modify", requestString, err, events)
}
logrus.Debugf(title + " succeeded ")
return nil
}

View file

@ -1,4 +1,4 @@
package hcsshim
package hcs
import (
"io"

View file

@ -1,4 +1,4 @@
package hcsshim
package hcs
import (
"time"
@ -6,13 +6,13 @@ import (
"github.com/sirupsen/logrus"
)
func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {
err = processHcsResult(err, resultp)
func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) {
events := processHcsResult(resultp)
if IsPending(err) {
return waitForNotification(callbackNumber, expectedNotification, timeout)
return nil, waitForNotification(callbackNumber, expectedNotification, timeout)
}
return err
return events, err
}
func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error {

View file

@ -0,0 +1,30 @@
package hcs
import (
"time"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/sirupsen/logrus"
)
// syscallWatcher is used as a very simple goroutine around calls into
// the platform. In some cases, we have seen HCS APIs not returning due to
// various bugs, and the goroutine making the syscall ends up not returning,
// prior to its async callback. By spinning up a syscallWatcher, it allows
// us to at least log a warning if a syscall doesn't complete in a reasonable
// amount of time.
//
// Usage is:
//
// completed := false
// go syscallWatcher("some description", &completed)
// <syscall>
// completed = true
//
func syscallWatcher(description string, syscallCompleted *bool) {
time.Sleep(timeout.SyscallWatcher)
if *syscallCompleted {
return
}
logrus.Warnf("%s: Did not complete within %s. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see is there is a syscall stuck in the platform API for a significant length of time.", description, timeout.SyscallWatcher)
}

View file

@ -0,0 +1,441 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package hcs
import (
"syscall"
"unsafe"
"github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems")
procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem")
procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem")
procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem")
procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem")
procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem")
procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem")
procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem")
procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem")
procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties")
procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem")
procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback")
procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback")
procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess")
procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess")
procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess")
procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess")
procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo")
procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties")
procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess")
procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties")
procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback")
procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback")
)
func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(query)
if hr != nil {
return
}
return _hcsEnumerateComputeSystems(_p0, computeSystems, result)
}
func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) {
if hr = procHcsEnumerateComputeSystems.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(configuration)
if hr != nil {
return
}
return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result)
}
func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) {
if hr = procHcsCreateComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _hcsOpenComputeSystem(_p0, computeSystem, result)
}
func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16) (hr error) {
if hr = procHcsOpenComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) {
if hr = procHcsCloseComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
return _hcsStartComputeSystem(computeSystem, _p0, result)
}
func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
if hr = procHcsStartComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
return _hcsShutdownComputeSystem(computeSystem, _p0, result)
}
func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
if hr = procHcsShutdownComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
return _hcsTerminateComputeSystem(computeSystem, _p0, result)
}
func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
if hr = procHcsTerminateComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
return _hcsPauseComputeSystem(computeSystem, _p0, result)
}
func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
if hr = procHcsPauseComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(options)
if hr != nil {
return
}
return _hcsResumeComputeSystem(computeSystem, _p0, result)
}
func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) {
if hr = procHcsResumeComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(propertyQuery)
if hr != nil {
return
}
return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result)
}
func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
if hr = procHcsGetComputeSystemProperties.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(configuration)
if hr != nil {
return
}
return _hcsModifyComputeSystem(computeSystem, _p0, result)
}
func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, result **uint16) (hr error) {
if hr = procHcsModifyComputeSystem.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) {
if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) {
if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(processParameters)
if hr != nil {
return
}
return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result)
}
func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) {
if hr = procHcsCreateProcess.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) {
if hr = procHcsOpenProcess.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsCloseProcess(process hcsProcess) (hr error) {
if hr = procHcsCloseProcess.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) {
if hr = procHcsTerminateProcess.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) {
if hr = procHcsGetProcessInfo.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) {
if hr = procHcsGetProcessProperties.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(settings)
if hr != nil {
return
}
return _hcsModifyProcess(process, _p0, result)
}
func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (hr error) {
if hr = procHcsModifyProcess.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(propertyQuery)
if hr != nil {
return
}
return _hcsGetServiceProperties(_p0, properties, result)
}
func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) {
if hr = procHcsGetServiceProperties.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) {
if hr = procHcsRegisterProcessCallback.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) {
if hr = procHcsUnregisterProcessCallback.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}

View file

@ -0,0 +1,51 @@
package hcserror
import (
"fmt"
"syscall"
)
const ERROR_GEN_FAILURE = syscall.Errno(31)
type HcsError struct {
title string
rest string
Err error
}
func (e *HcsError) Error() string {
s := e.title
if len(s) > 0 && s[len(s)-1] != ' ' {
s += " "
}
s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, Win32FromError(e.Err))
if e.rest != "" {
if e.rest[0] != ' ' {
s += " "
}
s += e.rest
}
return s
}
func New(err error, title, rest string) error {
// Pass through DLL errors directly since they do not originate from HCS.
if _, ok := err.(*syscall.DLLError); ok {
return err
}
return &HcsError{title, rest, err}
}
func Errorf(err error, title, format string, a ...interface{}) error {
return New(err, title, fmt.Sprintf(format, a...))
}
func Win32FromError(err error) uint32 {
if herr, ok := err.(*HcsError); ok {
return Win32FromError(herr.Err)
}
if code, ok := err.(syscall.Errno); ok {
return uint32(code)
}
return uint32(ERROR_GEN_FAILURE)
}

View file

@ -0,0 +1,23 @@
package hns
import "fmt"
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go
//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall?
type EndpointNotFoundError struct {
EndpointName string
}
func (e EndpointNotFoundError) Error() string {
return fmt.Sprintf("Endpoint %s not found", e.EndpointName)
}
type NetworkNotFoundError struct {
NetworkName string
}
func (e NetworkNotFoundError) Error() string {
return fmt.Sprintf("Network %s not found", e.NetworkName)
}

View file

@ -0,0 +1,260 @@
package hns
import (
"encoding/json"
"net"
"github.com/sirupsen/logrus"
)
// HNSEndpoint represents a network endpoint in HNS
type HNSEndpoint struct {
Id string `json:"ID,omitempty"`
Name string `json:",omitempty"`
VirtualNetwork string `json:",omitempty"`
VirtualNetworkName string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacAddress string `json:",omitempty"`
IPAddress net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"`
DisableICC bool `json:",omitempty"`
PrefixLength uint8 `json:",omitempty"`
IsRemoteEndpoint bool `json:",omitempty"`
Namespace *Namespace `json:",omitempty"`
}
//SystemType represents the type of the system on which actions are done
type SystemType string
// SystemType const
const (
ContainerType SystemType = "Container"
VirtualMachineType SystemType = "VirtualMachine"
HostType SystemType = "Host"
)
// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system
// Supported resource types are Network and Request Types are Add/Remove
type EndpointAttachDetachRequest struct {
ContainerID string `json:"ContainerId,omitempty"`
SystemType SystemType `json:"SystemType"`
CompartmentID uint16 `json:"CompartmentId,omitempty"`
VirtualNICName string `json:"VirtualNicName,omitempty"`
}
// EndpointResquestResponse is object to get the endpoint request response
type EndpointResquestResponse struct {
Success bool
Error string
}
// HNSEndpointRequest makes a HNS call to modify/query a network endpoint
func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) {
endpoint := &HNSEndpoint{}
err := hnsCall(method, "/endpoints/"+path, request, &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}
// HNSListEndpointRequest makes a HNS call to query the list of available endpoints
func HNSListEndpointRequest() ([]HNSEndpoint, error) {
var endpoint []HNSEndpoint
err := hnsCall("GET", "/endpoints/", "", &endpoint)
if err != nil {
return nil, err
}
return endpoint, nil
}
// GetHNSEndpointByID get the Endpoint by ID
func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) {
return HNSEndpointRequest("GET", endpointID, "")
}
// GetHNSEndpointByName gets the endpoint filtered by Name
func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) {
hnsResponse, err := HNSListEndpointRequest()
if err != nil {
return nil, err
}
for _, hnsEndpoint := range hnsResponse {
if hnsEndpoint.Name == endpointName {
return &hnsEndpoint, nil
}
}
return nil, EndpointNotFoundError{EndpointName: endpointName}
}
// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods
func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) {
operation := "Create"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
jsonString, err := json.Marshal(endpoint)
if err != nil {
return nil, err
}
return HNSEndpointRequest("POST", "", string(jsonString))
}
// Delete Endpoint by sending EndpointRequest to HNS
func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) {
operation := "Delete"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
return HNSEndpointRequest("DELETE", endpoint.Id, "")
}
// Update Endpoint
func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) {
operation := "Update"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
jsonString, err := json.Marshal(endpoint)
if err != nil {
return nil, err
}
err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint)
return endpoint, err
}
// ApplyACLPolicy applies a set of ACL Policies on the Endpoint
func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error {
operation := "ApplyACLPolicy"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
for _, policy := range policies {
if policy == nil {
continue
}
jsonString, err := json.Marshal(policy)
if err != nil {
return err
}
endpoint.Policies = append(endpoint.Policies, jsonString)
}
_, err := endpoint.Update()
return err
}
// ContainerAttach attaches an endpoint to container
func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error {
operation := "ContainerAttach"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
CompartmentID: compartmentID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// ContainerDetach detaches an endpoint from container
func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error {
operation := "ContainerDetach"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
ContainerID: containerID,
SystemType: ContainerType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// HostAttach attaches a nic on the host
func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error {
operation := "HostAttach"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
CompartmentID: compartmentID,
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// HostDetach detaches a nic on the host
func (endpoint *HNSEndpoint) HostDetach() error {
operation := "HostDetach"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: HostType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}
// VirtualMachineNICAttach attaches a endpoint to a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error {
operation := "VirtualMachineNicAttach"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
VirtualNICName: virtualMachineNICName,
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response)
}
// VirtualMachineNICDetach detaches a endpoint from a virtual machine
func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error {
operation := "VirtualMachineNicDetach"
title := "hcsshim::HNSEndpoint::" + operation
logrus.Debugf(title+" id=%s", endpoint.Id)
requestMessage := &EndpointAttachDetachRequest{
SystemType: VirtualMachineType,
}
response := &EndpointResquestResponse{}
jsonString, err := json.Marshal(requestMessage)
if err != nil {
return err
}
return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response)
}

View file

@ -1,9 +1,11 @@
package hcsshim
package hns
import (
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
@ -13,9 +15,9 @@ func hnsCall(method, path, request string, returnResponse interface{}) error {
err := _hnsCall(method, path, request, &responseBuffer)
if err != nil {
return makeError(err, "hnsCall ", "")
return hcserror.New(err, "hnsCall ", "")
}
response := convertAndFreeCoTaskMemString(responseBuffer)
response := interop.ConvertAndFreeCoTaskMemString(responseBuffer)
hnsresponse := &hnsResponse{}
if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil {

View file

@ -0,0 +1,28 @@
package hns
type HNSGlobals struct {
Version HNSVersion `json:"Version"`
}
type HNSVersion struct {
Major int `json:"Major"`
Minor int `json:"Minor"`
}
var (
HNSVersion1803 = HNSVersion{Major: 7, Minor: 2}
)
func GetHNSGlobals() (*HNSGlobals, error) {
var version HNSVersion
err := hnsCall("GET", "/globals/version", "", &version)
if err != nil {
return nil, err
}
globals := &HNSGlobals{
Version: version,
}
return globals, nil
}

View file

@ -0,0 +1,141 @@
package hns
import (
"encoding/json"
"net"
"github.com/sirupsen/logrus"
)
// Subnet is assoicated with a network and represents a list
// of subnets available to the network
type Subnet struct {
AddressPrefix string `json:",omitempty"`
GatewayAddress string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
}
// MacPool is assoicated with a network and represents a list
// of macaddresses available to the network
type MacPool struct {
StartMacAddress string `json:",omitempty"`
EndMacAddress string `json:",omitempty"`
}
// HNSNetwork represents a network in HNS
type HNSNetwork struct {
Id string `json:"ID,omitempty"`
Name string `json:",omitempty"`
Type string `json:",omitempty"`
NetworkAdapterName string `json:",omitempty"`
SourceMac string `json:",omitempty"`
Policies []json.RawMessage `json:",omitempty"`
MacPools []MacPool `json:",omitempty"`
Subnets []Subnet `json:",omitempty"`
DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"`
DNSServerCompartment uint32 `json:",omitempty"`
ManagementIP string `json:",omitempty"`
AutomaticDNS bool `json:",omitempty"`
}
type hnsNetworkResponse struct {
Success bool
Error string
Output HNSNetwork
}
type hnsResponse struct {
Success bool
Error string
Output json.RawMessage
}
// HNSNetworkRequest makes a call into HNS to update/query a single network
func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) {
var network HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return &network, nil
}
// HNSListNetworkRequest makes a HNS call to query the list of available networks
func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) {
var network []HNSNetwork
err := hnsCall(method, "/networks/"+path, request, &network)
if err != nil {
return nil, err
}
return network, nil
}
// GetHNSNetworkByID
func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) {
return HNSNetworkRequest("GET", networkID, "")
}
// GetHNSNetworkName filtered by Name
func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) {
hsnnetworks, err := HNSListNetworkRequest("GET", "", "")
if err != nil {
return nil, err
}
for _, hnsnetwork := range hsnnetworks {
if hnsnetwork.Name == networkName {
return &hnsnetwork, nil
}
}
return nil, NetworkNotFoundError{NetworkName: networkName}
}
// Create Network by sending NetworkRequest to HNS.
func (network *HNSNetwork) Create() (*HNSNetwork, error) {
operation := "Create"
title := "hcsshim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
jsonString, err := json.Marshal(network)
if err != nil {
return nil, err
}
return HNSNetworkRequest("POST", "", string(jsonString))
}
// Delete Network by sending NetworkRequest to HNS
func (network *HNSNetwork) Delete() (*HNSNetwork, error) {
operation := "Delete"
title := "hcsshim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
return HNSNetworkRequest("DELETE", network.Id, "")
}
// Creates an endpoint on the Network.
func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint {
return &HNSEndpoint{
VirtualNetwork: network.Id,
IPAddress: ipAddress,
MacAddress: string(macAddress),
}
}
func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
operation := "CreateEndpoint"
title := "hcsshim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id)
endpoint.VirtualNetwork = network.Id
return endpoint.Create()
}
func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) {
operation := "CreateRemoteEndpoint"
title := "hcsshim::HNSNetwork::" + operation
logrus.Debugf(title+" id=%s", network.Id)
endpoint.IsRemoteEndpoint = true
return network.CreateEndpoint(endpoint)
}

View file

@ -0,0 +1,98 @@
package hns
// Type of Request Support in ModifySystem
type PolicyType string
// RequestType const
const (
Nat PolicyType = "NAT"
ACL PolicyType = "ACL"
PA PolicyType = "PA"
VLAN PolicyType = "VLAN"
VSID PolicyType = "VSID"
VNet PolicyType = "VNET"
L2Driver PolicyType = "L2Driver"
Isolation PolicyType = "Isolation"
QOS PolicyType = "QOS"
OutboundNat PolicyType = "OutBoundNAT"
ExternalLoadBalancer PolicyType = "ELB"
Route PolicyType = "ROUTE"
)
type NatPolicy struct {
Type PolicyType `json:"Type"`
Protocol string
InternalPort uint16
ExternalPort uint16
}
type QosPolicy struct {
Type PolicyType `json:"Type"`
MaximumOutgoingBandwidthInBytes uint64
}
type IsolationPolicy struct {
Type PolicyType `json:"Type"`
VLAN uint
VSID uint
InDefaultIsolation bool
}
type VlanPolicy struct {
Type PolicyType `json:"Type"`
VLAN uint
}
type VsidPolicy struct {
Type PolicyType `json:"Type"`
VSID uint
}
type PaPolicy struct {
Type PolicyType `json:"Type"`
PA string `json:"PA"`
}
type OutboundNatPolicy struct {
Policy
VIP string `json:"VIP,omitempty"`
Exceptions []string `json:"ExceptionList,omitempty"`
}
type ActionType string
type DirectionType string
type RuleType string
const (
Allow ActionType = "Allow"
Block ActionType = "Block"
In DirectionType = "In"
Out DirectionType = "Out"
Host RuleType = "Host"
Switch RuleType = "Switch"
)
type ACLPolicy struct {
Type PolicyType `json:"Type"`
Id string `json:"Id,omitempty"`
Protocol uint16
Protocols string `json:"Protocols,omitempty"`
InternalPort uint16
Action ActionType
Direction DirectionType
LocalAddresses string
RemoteAddresses string
LocalPorts string `json:"LocalPorts,omitempty"`
LocalPort uint16
RemotePorts string `json:"RemotePorts,omitempty"`
RemotePort uint16
RuleType RuleType `json:"RuleType,omitempty"`
Priority uint16
ServiceName string
}
type Policy struct {
Type PolicyType `json:"Type"`
}

View file

@ -0,0 +1,200 @@
package hns
import (
"encoding/json"
"github.com/sirupsen/logrus"
)
// RoutePolicy is a structure defining schema for Route based Policy
type RoutePolicy struct {
Policy
DestinationPrefix string `json:"DestinationPrefix,omitempty"`
NextHop string `json:"NextHop,omitempty"`
EncapEnabled bool `json:"NeedEncap,omitempty"`
}
// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy
type ELBPolicy struct {
LBPolicy
SourceVIP string `json:"SourceVIP,omitempty"`
VIPs []string `json:"VIPs,omitempty"`
ILB bool `json:"ILB,omitempty"`
}
// LBPolicy is a structure defining schema for LoadBalancing based Policy
type LBPolicy struct {
Policy
Protocol uint16 `json:"Protocol,omitempty"`
InternalPort uint16
ExternalPort uint16
}
// PolicyList is a structure defining schema for Policy list request
type PolicyList struct {
ID string `json:"ID,omitempty"`
EndpointReferences []string `json:"References,omitempty"`
Policies []json.RawMessage `json:"Policies,omitempty"`
}
// HNSPolicyListRequest makes a call into HNS to update/query a single network
func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) {
var policy PolicyList
err := hnsCall(method, "/policylists/"+path, request, &policy)
if err != nil {
return nil, err
}
return &policy, nil
}
// HNSListPolicyListRequest gets all the policy list
func HNSListPolicyListRequest() ([]PolicyList, error) {
var plist []PolicyList
err := hnsCall("GET", "/policylists/", "", &plist)
if err != nil {
return nil, err
}
return plist, nil
}
// PolicyListRequest makes a HNS call to modify/query a network policy list
func PolicyListRequest(method, path, request string) (*PolicyList, error) {
policylist := &PolicyList{}
err := hnsCall(method, "/policylists/"+path, request, &policylist)
if err != nil {
return nil, err
}
return policylist, nil
}
// GetPolicyListByID get the policy list by ID
func GetPolicyListByID(policyListID string) (*PolicyList, error) {
return PolicyListRequest("GET", policyListID, "")
}
// Create PolicyList by sending PolicyListRequest to HNS.
func (policylist *PolicyList) Create() (*PolicyList, error) {
operation := "Create"
title := "hcsshim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.ID)
jsonString, err := json.Marshal(policylist)
if err != nil {
return nil, err
}
return PolicyListRequest("POST", "", string(jsonString))
}
// Delete deletes PolicyList
func (policylist *PolicyList) Delete() (*PolicyList, error) {
operation := "Delete"
title := "hcsshim::PolicyList::" + operation
logrus.Debugf(title+" id=%s", policylist.ID)
return PolicyListRequest("DELETE", policylist.ID, "")
}
// AddEndpoint add an endpoint to a Policy List
func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "AddEndpoint"
title := "hcsshim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete()
if err != nil {
return nil, err
}
// Add Endpoint to the Existing List
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
return policylist.Create()
}
// RemoveEndpoint removes an endpoint from the Policy List
func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) {
operation := "RemoveEndpoint"
title := "hcsshim::PolicyList::" + operation
logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id)
_, err := policylist.Delete()
if err != nil {
return nil, err
}
elementToRemove := "/endpoints/" + endpoint.Id
var references []string
for _, endpointReference := range policylist.EndpointReferences {
if endpointReference == elementToRemove {
continue
}
references = append(references, endpointReference)
}
policylist.EndpointReferences = references
return policylist.Create()
}
// AddLoadBalancer policy list for the specified endpoints
func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) {
operation := "AddLoadBalancer"
title := "hcsshim::PolicyList::" + operation
logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort)
policylist := &PolicyList{}
elbPolicy := &ELBPolicy{
SourceVIP: sourceVIP,
ILB: isILB,
}
if len(vip) > 0 {
elbPolicy.VIPs = []string{vip}
}
elbPolicy.Type = ExternalLoadBalancer
elbPolicy.Protocol = protocol
elbPolicy.InternalPort = internalPort
elbPolicy.ExternalPort = externalPort
for _, endpoint := range endpoints {
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
}
jsonString, err := json.Marshal(elbPolicy)
if err != nil {
return nil, err
}
policylist.Policies = append(policylist.Policies, jsonString)
return policylist.Create()
}
// AddRoute adds route policy list for the specified endpoints
func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) {
operation := "AddRoute"
title := "hcsshim::PolicyList::" + operation
logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix)
policylist := &PolicyList{}
rPolicy := &RoutePolicy{
DestinationPrefix: destinationPrefix,
NextHop: nextHop,
EncapEnabled: encapEnabled,
}
rPolicy.Type = Route
for _, endpoint := range endpoints {
policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id)
}
jsonString, err := json.Marshal(rPolicy)
if err != nil {
return nil, err
}
policylist.Policies = append(policylist.Policies, jsonString)
return policylist.Create()
}

View file

@ -0,0 +1,49 @@
package hns
import (
"github.com/sirupsen/logrus"
)
type HNSSupportedFeatures struct {
Acl HNSAclFeatures `json:"ACL"`
}
type HNSAclFeatures struct {
AclAddressLists bool `json:"AclAddressLists"`
AclNoHostRulePriority bool `json:"AclHostRulePriority"`
AclPortRanges bool `json:"AclPortRanges"`
AclRuleId bool `json:"AclRuleId"`
}
func GetHNSSupportedFeatures() HNSSupportedFeatures {
var hnsFeatures HNSSupportedFeatures
globals, err := GetHNSGlobals()
if err != nil {
// Expected on pre-1803 builds, all features will be false/unsupported
logrus.Debugf("Unable to obtain HNS globals: %s", err)
return hnsFeatures
}
hnsFeatures.Acl = HNSAclFeatures{
AclAddressLists: isHNSFeatureSupported(globals.Version, HNSVersion1803),
AclNoHostRulePriority: isHNSFeatureSupported(globals.Version, HNSVersion1803),
AclPortRanges: isHNSFeatureSupported(globals.Version, HNSVersion1803),
AclRuleId: isHNSFeatureSupported(globals.Version, HNSVersion1803),
}
return hnsFeatures
}
func isHNSFeatureSupported(currentVersion HNSVersion, minVersionSupported HNSVersion) bool {
if currentVersion.Major < minVersionSupported.Major {
return false
}
if currentVersion.Major > minVersionSupported.Major {
return true
}
if currentVersion.Minor < minVersionSupported.Minor {
return false
}
return true
}

View file

@ -0,0 +1,110 @@
package hns
import (
"encoding/json"
"fmt"
"os"
"path"
"strings"
)
type namespaceRequest struct {
IsDefault bool `json:",omitempty"`
}
type namespaceEndpointRequest struct {
ID string `json:"Id"`
}
type NamespaceResource struct {
Type string
Data json.RawMessage
}
type namespaceResourceRequest struct {
Type string
Data interface{}
}
type Namespace struct {
ID string
IsDefault bool `json:",omitempty"`
ResourceList []NamespaceResource `json:",omitempty"`
}
func issueNamespaceRequest(id *string, method, subpath string, request interface{}) (*Namespace, error) {
var err error
hnspath := "/namespaces/"
if id != nil {
hnspath = path.Join(hnspath, *id)
}
if subpath != "" {
hnspath = path.Join(hnspath, subpath)
}
var reqJSON []byte
if request != nil {
if reqJSON, err = json.Marshal(request); err != nil {
return nil, err
}
}
var ns Namespace
err = hnsCall(method, hnspath, string(reqJSON), &ns)
if err != nil {
if strings.Contains(err.Error(), "Element not found.") {
return nil, os.ErrNotExist
}
return nil, fmt.Errorf("%s %s: %s", method, hnspath, err)
}
return &ns, err
}
func CreateNamespace() (string, error) {
req := namespaceRequest{}
ns, err := issueNamespaceRequest(nil, "POST", "", &req)
if err != nil {
return "", err
}
return ns.ID, nil
}
func RemoveNamespace(id string) error {
_, err := issueNamespaceRequest(&id, "DELETE", "", nil)
return err
}
func GetNamespaceEndpoints(id string) ([]string, error) {
ns, err := issueNamespaceRequest(&id, "GET", "", nil)
if err != nil {
return nil, err
}
var endpoints []string
for _, rsrc := range ns.ResourceList {
if rsrc.Type == "Endpoint" {
var endpoint namespaceEndpointRequest
err = json.Unmarshal(rsrc.Data, &endpoint)
if err != nil {
return nil, fmt.Errorf("unmarshal endpoint: %s", err)
}
endpoints = append(endpoints, endpoint.ID)
}
}
return endpoints, nil
}
func AddNamespaceEndpoint(id string, endpointID string) error {
resource := namespaceResourceRequest{
Type: "Endpoint",
Data: namespaceEndpointRequest{endpointID},
}
_, err := issueNamespaceRequest(&id, "POST", "addresource", &resource)
return err
}
func RemoveNamespaceEndpoint(id string, endpointID string) error {
resource := namespaceResourceRequest{
Type: "Endpoint",
Data: namespaceEndpointRequest{endpointID},
}
_, err := issueNamespaceRequest(&id, "POST", "removeresource", &resource)
return err
}

View file

@ -0,0 +1,74 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package hns
import (
"syscall"
"unsafe"
"github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
procHNSCall = modvmcompute.NewProc("HNSCall")
)
func _hnsCall(method string, path string, object string, response **uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(method)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
var _p2 *uint16
_p2, hr = syscall.UTF16PtrFromString(object)
if hr != nil {
return
}
return __hnsCall(_p0, _p1, _p2, response)
}
func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) {
if hr = procHNSCall.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}

View file

@ -0,0 +1,27 @@
package interop
import (
"syscall"
"unsafe"
)
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go interop.go
//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree
func ConvertAndFreeCoTaskMemString(buffer *uint16) string {
str := syscall.UTF16ToString((*[1 << 29]uint16)(unsafe.Pointer(buffer))[:])
coTaskMemFree(unsafe.Pointer(buffer))
return str
}
func ConvertAndFreeCoTaskMemBytes(buffer *uint16) []byte {
return []byte(ConvertAndFreeCoTaskMemString(buffer))
}
func Win32FromHresult(hr uintptr) syscall.Errno {
if hr&0x1fff0000 == 0x00070000 {
return syscall.Errno(hr & 0xffff)
}
return syscall.Errno(hr)
}

View file

@ -0,0 +1,48 @@
// Code generated by 'go generate'; DO NOT EDIT.
package interop
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modole32 = windows.NewLazySystemDLL("ole32.dll")
procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
)
func coTaskMemFree(buffer unsafe.Pointer) {
syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0)
return
}

View file

@ -0,0 +1,24 @@
package longpath
import (
"path/filepath"
"strings"
)
// LongAbs makes a path absolute and returns it in NT long path form.
func LongAbs(path string) (string, error) {
if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
return path, nil
}
if !filepath.IsAbs(path) {
absPath, err := filepath.Abs(path)
if err != nil {
return "", err
}
path = absPath
}
if strings.HasPrefix(path, `\\`) {
return `\\?\UNC\` + path[2:], nil
}
return `\\?\` + path, nil
}

View file

@ -0,0 +1,52 @@
package mergemaps
import "encoding/json"
// Merge recursively merges map `fromMap` into map `ToMap`. Any pre-existing values
// in ToMap are overwritten. Values in fromMap are added to ToMap.
// From http://stackoverflow.com/questions/40491438/merging-two-json-strings-in-golang
func Merge(fromMap, ToMap interface{}) interface{} {
switch fromMap := fromMap.(type) {
case map[string]interface{}:
ToMap, ok := ToMap.(map[string]interface{})
if !ok {
return fromMap
}
for keyToMap, valueToMap := range ToMap {
if valueFromMap, ok := fromMap[keyToMap]; ok {
fromMap[keyToMap] = Merge(valueFromMap, valueToMap)
} else {
fromMap[keyToMap] = valueToMap
}
}
case nil:
// merge(nil, map[string]interface{...}) -> map[string]interface{...}
ToMap, ok := ToMap.(map[string]interface{})
if ok {
return ToMap
}
}
return fromMap
}
// MergeJSON merges the contents of a JSON string into an object representation,
// returning a new object suitable for translating to JSON.
func MergeJSON(object interface{}, additionalJSON []byte) (interface{}, error) {
if len(additionalJSON) == 0 {
return object, nil
}
objectJSON, err := json.Marshal(object)
if err != nil {
return nil, err
}
var objectMap, newMap map[string]interface{}
err = json.Unmarshal(objectJSON, &objectMap)
if err != nil {
return nil, err
}
err = json.Unmarshal(additionalJSON, &newMap)
if err != nil {
return nil, err
}
return Merge(newMap, objectMap), nil
}

View file

@ -1,4 +1,4 @@
package hcsshim
package safefile
import (
"errors"
@ -10,9 +10,13 @@ import (
"unicode/utf16"
"unsafe"
"github.com/Microsoft/hcsshim/internal/longpath"
winio "github.com/Microsoft/go-winio"
)
//go:generate go run $GOROOT\src\syscall\mksyscall_windows.go -output zsyscall_windows.go safeopen.go
//sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile
//sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile
//sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
@ -53,28 +57,28 @@ const (
_FileLinkInformation = 11
_FileDispositionInformationEx = 64
_FILE_READ_ATTRIBUTES = 0x0080
_FILE_WRITE_ATTRIBUTES = 0x0100
_DELETE = 0x10000
FILE_READ_ATTRIBUTES = 0x0080
FILE_WRITE_ATTRIBUTES = 0x0100
DELETE = 0x10000
_FILE_OPEN = 1
_FILE_CREATE = 2
FILE_OPEN = 1
FILE_CREATE = 2
_FILE_DIRECTORY_FILE = 0x00000001
_FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020
_FILE_DELETE_ON_CLOSE = 0x00001000
_FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000
_FILE_OPEN_REPARSE_POINT = 0x00200000
FILE_DIRECTORY_FILE = 0x00000001
FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020
FILE_DELETE_ON_CLOSE = 0x00001000
FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000
FILE_OPEN_REPARSE_POINT = 0x00200000
_FILE_DISPOSITION_DELETE = 0x00000001
FILE_DISPOSITION_DELETE = 0x00000001
_OBJ_DONT_REPARSE = 0x1000
_STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B
)
func openRoot(path string) (*os.File, error) {
longpath, err := makeLongAbsPath(path)
func OpenRoot(path string) (*os.File, error) {
longpath, err := longpath.LongAbs(path)
if err != nil {
return nil, err
}
@ -141,7 +145,7 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl
0,
shareFlags,
createDisposition,
_FILE_OPEN_FOR_BACKUP_INTENT|_FILE_SYNCHRONOUS_IO_NONALERT|flags,
FILE_OPEN_FOR_BACKUP_INTENT|FILE_SYNCHRONOUS_IO_NONALERT|flags,
nil,
0,
)
@ -149,7 +153,7 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl
return nil, rtlNtStatusToDosError(status)
}
fullPath, err := makeLongAbsPath(filepath.Join(root.Name(), path))
fullPath, err := longpath.LongAbs(filepath.Join(root.Name(), path))
if err != nil {
syscall.Close(syscall.Handle(h))
return nil, err
@ -158,9 +162,9 @@ func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFl
return os.NewFile(h, fullPath), nil
}
// openRelative opens a relative path from the given root, failing if
// OpenRelative opens a relative path from the given root, failing if
// any of the intermediate path components are reparse points.
func openRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) {
func OpenRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) {
f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags)
if err != nil {
err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err}
@ -168,17 +172,17 @@ func openRelative(path string, root *os.File, accessMask uint32, shareFlags uint
return f, err
}
// linkRelative creates a hard link from oldname to newname (relative to oldroot
// LinkRelative creates a hard link from oldname to newname (relative to oldroot
// and newroot), failing if any of the intermediate path components are reparse
// points.
func linkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error {
func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error {
// Open the old file.
oldf, err := openRelativeInternal(
oldname,
oldroot,
syscall.FILE_WRITE_ATTRIBUTES,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
FILE_OPEN,
0,
)
if err != nil {
@ -195,8 +199,8 @@ func linkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
newroot,
syscall.GENERIC_READ,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
_FILE_DIRECTORY_FILE)
FILE_OPEN,
FILE_DIRECTORY_FILE)
if err != nil {
return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err}
}
@ -248,7 +252,7 @@ func linkRelative(oldname string, oldroot *os.File, newname string, newroot *os.
// deleteOnClose marks a file to be deleted when the handle is closed.
func deleteOnClose(f *os.File) error {
disposition := fileDispositionInformationEx{Flags: _FILE_DISPOSITION_DELETE}
disposition := fileDispositionInformationEx{Flags: FILE_DISPOSITION_DELETE}
var iosb ioStatusBlock
status := ntSetInformationFile(
f.Fd(),
@ -281,16 +285,16 @@ func clearReadOnly(f *os.File) error {
return winio.SetFileBasicInfo(f, &sbi)
}
// removeRelative removes a file or directory relative to a root, failing if any
// RemoveRelative removes a file or directory relative to a root, failing if any
// intermediate path components are reparse points.
func removeRelative(path string, root *os.File) error {
func RemoveRelative(path string, root *os.File) error {
f, err := openRelativeInternal(
path,
root,
_FILE_READ_ATTRIBUTES|_FILE_WRITE_ATTRIBUTES|_DELETE,
FILE_READ_ATTRIBUTES|FILE_WRITE_ATTRIBUTES|DELETE,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
_FILE_OPEN_REPARSE_POINT)
FILE_OPEN,
FILE_OPEN_REPARSE_POINT)
if err == nil {
defer f.Close()
err = deleteOnClose(f)
@ -306,10 +310,10 @@ func removeRelative(path string, root *os.File) error {
return nil
}
// removeAllRelative removes a directory tree relative to a root, failing if any
// RemoveAllRelative removes a directory tree relative to a root, failing if any
// intermediate path components are reparse points.
func removeAllRelative(path string, root *os.File) error {
fi, err := lstatRelative(path, root)
func RemoveAllRelative(path string, root *os.File) error {
fi, err := LstatRelative(path, root)
if err != nil {
if os.IsNotExist(err) {
return nil
@ -319,7 +323,7 @@ func removeAllRelative(path string, root *os.File) error {
fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes
if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 {
// If this is a reparse point, it can't have children. Simple remove will do.
err := removeRelative(path, root)
err := RemoveRelative(path, root)
if err == nil || os.IsNotExist(err) {
return nil
}
@ -327,7 +331,7 @@ func removeAllRelative(path string, root *os.File) error {
}
// It is necessary to use os.Open as Readdirnames does not work with
// openRelative. This is safe because the above lstatrelative fails
// OpenRelative. This is safe because the above lstatrelative fails
// if the target is outside the root, and we know this is not a
// symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check.
fd, err := os.Open(filepath.Join(root.Name(), path))
@ -344,7 +348,7 @@ func removeAllRelative(path string, root *os.File) error {
for {
names, err1 := fd.Readdirnames(100)
for _, name := range names {
err1 := removeAllRelative(path+string(os.PathSeparator)+name, root)
err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root)
if err == nil {
err = err1
}
@ -363,7 +367,7 @@ func removeAllRelative(path string, root *os.File) error {
fd.Close()
// Remove directory.
err1 := removeRelative(path, root)
err1 := RemoveRelative(path, root)
if err1 == nil || os.IsNotExist(err1) {
return nil
}
@ -373,16 +377,16 @@ func removeAllRelative(path string, root *os.File) error {
return err
}
// mkdirRelative creates a directory relative to a root, failing if any
// MkdirRelative creates a directory relative to a root, failing if any
// intermediate path components are reparse points.
func mkdirRelative(path string, root *os.File) error {
func MkdirRelative(path string, root *os.File) error {
f, err := openRelativeInternal(
path,
root,
0,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_CREATE,
_FILE_DIRECTORY_FILE)
FILE_CREATE,
FILE_DIRECTORY_FILE)
if err == nil {
f.Close()
} else {
@ -391,16 +395,16 @@ func mkdirRelative(path string, root *os.File) error {
return err
}
// lstatRelative performs a stat operation on a file relative to a root, failing
// LstatRelative performs a stat operation on a file relative to a root, failing
// if any intermediate path components are reparse points.
func lstatRelative(path string, root *os.File) (os.FileInfo, error) {
func LstatRelative(path string, root *os.File) (os.FileInfo, error) {
f, err := openRelativeInternal(
path,
root,
_FILE_READ_ATTRIBUTES,
FILE_READ_ATTRIBUTES,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
_FILE_OPEN_REPARSE_POINT)
FILE_OPEN,
FILE_OPEN_REPARSE_POINT)
if err != nil {
return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err}
}
@ -408,16 +412,16 @@ func lstatRelative(path string, root *os.File) (os.FileInfo, error) {
return f.Stat()
}
// ensureNotReparsePointRelative validates that a given file (relative to a
// EnsureNotReparsePointRelative validates that a given file (relative to a
// root) and all intermediate path components are not a reparse points.
func ensureNotReparsePointRelative(path string, root *os.File) error {
func EnsureNotReparsePointRelative(path string, root *os.File) error {
// Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT.
f, err := openRelative(
f, err := OpenRelative(
path,
root,
0,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
_FILE_OPEN,
FILE_OPEN,
0)
if err != nil {
return err

View file

@ -0,0 +1,79 @@
// Code generated by 'go generate'; DO NOT EDIT.
package safefile
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
procNtCreateFile = modntdll.NewProc("NtCreateFile")
procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
)
func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) {
r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0)
status = uint32(r0)
return
}
func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0)
status = uint32(r0)
return
}
func rtlNtStatusToDosError(status uint32) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func localAlloc(flags uint32, size int) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
ptr = uintptr(r0)
return
}
func localFree(ptr uintptr) {
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0)
return
}

View file

@ -0,0 +1,228 @@
package schema1
import (
"encoding/json"
"time"
)
// ProcessConfig is used as both the input of Container.CreateProcess
// and to convert the parameters to JSON for passing onto the HCS
type ProcessConfig struct {
ApplicationName string `json:",omitempty"`
CommandLine string `json:",omitempty"`
CommandArgs []string `json:",omitempty"` // Used by Linux Containers on Windows
User string `json:",omitempty"`
WorkingDirectory string `json:",omitempty"`
Environment map[string]string `json:",omitempty"`
EmulateConsole bool `json:",omitempty"`
CreateStdInPipe bool `json:",omitempty"`
CreateStdOutPipe bool `json:",omitempty"`
CreateStdErrPipe bool `json:",omitempty"`
ConsoleSize [2]uint `json:",omitempty"`
CreateInUtilityVm bool `json:",omitempty"` // Used by Linux Containers on Windows
OCISpecification *json.RawMessage `json:",omitempty"` // Used by Linux Containers on Windows
}
type Layer struct {
ID string
Path string
}
type MappedDir struct {
HostPath string
ContainerPath string
ReadOnly bool
BandwidthMaximum uint64
IOPSMaximum uint64
CreateInUtilityVM bool
// LinuxMetadata - Support added in 1803/RS4+.
LinuxMetadata bool `json:",omitempty"`
}
type MappedPipe struct {
HostPath string
ContainerPipeName string
}
type HvRuntime struct {
ImagePath string `json:",omitempty"`
SkipTemplate bool `json:",omitempty"`
LinuxInitrdFile string `json:",omitempty"` // File under ImagePath on host containing an initrd image for starting a Linux utility VM
LinuxKernelFile string `json:",omitempty"` // File under ImagePath on host containing a kernel for starting a Linux utility VM
LinuxBootParameters string `json:",omitempty"` // Additional boot parameters for starting a Linux Utility VM in initrd mode
BootSource string `json:",omitempty"` // "Vhd" for Linux Utility VM booting from VHD
WritableBootSource bool `json:",omitempty"` // Linux Utility VM booting from VHD
}
type MappedVirtualDisk struct {
HostPath string `json:",omitempty"` // Path to VHD on the host
ContainerPath string // Platform-specific mount point path in the container
CreateInUtilityVM bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Cache string `json:",omitempty"` // "" (Unspecified); "Disabled"; "Enabled"; "Private"; "PrivateAllowSharing"
AttachOnly bool `json:",omitempty:`
}
// AssignedDevice represents a device that has been directly assigned to a container
//
// NOTE: Support added in RS5
type AssignedDevice struct {
// InterfaceClassGUID of the device to assign to container.
InterfaceClassGUID string `json:"InterfaceClassGuid,omitempty"`
}
// ContainerConfig is used as both the input of CreateContainer
// and to convert the parameters to JSON for passing onto the HCS
type ContainerConfig struct {
SystemType string // HCS requires this to be hard-coded to "Container"
Name string // Name of the container. We use the docker ID.
Owner string `json:",omitempty"` // The management platform that created this container
VolumePath string `json:",omitempty"` // Windows volume path for scratch space. Used by Windows Server Containers only. Format \\?\\Volume{GUID}
IgnoreFlushesDuringBoot bool `json:",omitempty"` // Optimization hint for container startup in Windows
LayerFolderPath string `json:",omitempty"` // Where the layer folders are located. Used by Windows Server Containers only. Format %root%\windowsfilter\containerID
Layers []Layer // List of storage layers. Required for Windows Server and Hyper-V Containers. Format ID=GUID;Path=%root%\windowsfilter\layerID
Credentials string `json:",omitempty"` // Credentials information
ProcessorCount uint32 `json:",omitempty"` // Number of processors to assign to the container.
ProcessorWeight uint64 `json:",omitempty"` // CPU shares (relative weight to other containers with cpu shares). Range is from 1 to 10000. A value of 0 results in default shares.
ProcessorMaximum int64 `json:",omitempty"` // Specifies the portion of processor cycles that this container can use as a percentage times 100. Range is from 1 to 10000. A value of 0 results in no limit.
StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS
StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second
StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller
MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes
HostName string `json:",omitempty"` // Hostname
MappedDirectories []MappedDir `json:",omitempty"` // List of mapped directories (volumes/mounts)
MappedPipes []MappedPipe `json:",omitempty"` // List of mapped Windows named pipes
HvPartition bool // True if it a Hyper-V Container
NetworkSharedContainerName string `json:",omitempty"` // Name (ID) of the container that we will share the network stack with.
EndpointList []string `json:",omitempty"` // List of networking endpoints to be attached to container
HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM
Servicing bool `json:",omitempty"` // True if this container is for servicing
AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution
DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution
ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise.
TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed
MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start
AssignedDevices []AssignedDevice `json:",omitempty"` // Array of devices to assign. NOTE: Support added in RS5
}
type ComputeSystemQuery struct {
IDs []string `json:"Ids,omitempty"`
Types []string `json:",omitempty"`
Names []string `json:",omitempty"`
Owners []string `json:",omitempty"`
}
type PropertyType string
const (
PropertyTypeStatistics PropertyType = "Statistics"
PropertyTypeProcessList = "ProcessList"
PropertyTypeMappedVirtualDisk = "MappedVirtualDisk"
)
type PropertyQuery struct {
PropertyTypes []PropertyType `json:",omitempty"`
}
// ContainerProperties holds the properties for a container and the processes running in that container
type ContainerProperties struct {
ID string `json:"Id"`
State string
Name string
SystemType string
Owner string
SiloGUID string `json:"SiloGuid,omitempty"`
RuntimeID string `json:"RuntimeId,omitempty"`
IsRuntimeTemplate bool `json:",omitempty"`
RuntimeImagePath string `json:",omitempty"`
Stopped bool `json:",omitempty"`
ExitType string `json:",omitempty"`
AreUpdatesPending bool `json:",omitempty"`
ObRoot string `json:",omitempty"`
Statistics Statistics `json:",omitempty"`
ProcessList []ProcessListItem `json:",omitempty"`
MappedVirtualDiskControllers map[int]MappedVirtualDiskController `json:",omitempty"`
}
// MemoryStats holds the memory statistics for a container
type MemoryStats struct {
UsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`
UsageCommitPeakBytes uint64 `json:"MemoryUsageCommitPeakBytes,omitempty"`
UsagePrivateWorkingSetBytes uint64 `json:"MemoryUsagePrivateWorkingSetBytes,omitempty"`
}
// ProcessorStats holds the processor statistics for a container
type ProcessorStats struct {
TotalRuntime100ns uint64 `json:",omitempty"`
RuntimeUser100ns uint64 `json:",omitempty"`
RuntimeKernel100ns uint64 `json:",omitempty"`
}
// StorageStats holds the storage statistics for a container
type StorageStats struct {
ReadCountNormalized uint64 `json:",omitempty"`
ReadSizeBytes uint64 `json:",omitempty"`
WriteCountNormalized uint64 `json:",omitempty"`
WriteSizeBytes uint64 `json:",omitempty"`
}
// NetworkStats holds the network statistics for a container
type NetworkStats struct {
BytesReceived uint64 `json:",omitempty"`
BytesSent uint64 `json:",omitempty"`
PacketsReceived uint64 `json:",omitempty"`
PacketsSent uint64 `json:",omitempty"`
DroppedPacketsIncoming uint64 `json:",omitempty"`
DroppedPacketsOutgoing uint64 `json:",omitempty"`
EndpointId string `json:",omitempty"`
InstanceId string `json:",omitempty"`
}
// Statistics is the structure returned by a statistics call on a container
type Statistics struct {
Timestamp time.Time `json:",omitempty"`
ContainerStartTime time.Time `json:",omitempty"`
Uptime100ns uint64 `json:",omitempty"`
Memory MemoryStats `json:",omitempty"`
Processor ProcessorStats `json:",omitempty"`
Storage StorageStats `json:",omitempty"`
Network []NetworkStats `json:",omitempty"`
}
// ProcessList is the structure of an item returned by a ProcessList call on a container
type ProcessListItem struct {
CreateTimestamp time.Time `json:",omitempty"`
ImageName string `json:",omitempty"`
KernelTime100ns uint64 `json:",omitempty"`
MemoryCommitBytes uint64 `json:",omitempty"`
MemoryWorkingSetPrivateBytes uint64 `json:",omitempty"`
MemoryWorkingSetSharedBytes uint64 `json:",omitempty"`
ProcessId uint32 `json:",omitempty"`
UserTime100ns uint64 `json:",omitempty"`
}
// MappedVirtualDiskController is the structure of an item returned by a MappedVirtualDiskList call on a container
type MappedVirtualDiskController struct {
MappedVirtualDisks map[int]MappedVirtualDisk `json:",omitempty"`
}
// Type of Request Support in ModifySystem
type RequestType string
// Type of Resource Support in ModifySystem
type ResourceType string
// RequestType const
const (
Add RequestType = "Add"
Remove RequestType = "Remove"
Network ResourceType = "Network"
)
// ResourceModificationRequestResponse is the structure used to send request to the container to modify the system
// Supported resource types are Network and Request Types are Add/Remove
type ResourceModificationRequestResponse struct {
Resource ResourceType `json:"ResourceType"`
Data interface{} `json:"Settings"`
Request RequestType `json:"RequestType,omitempty"`
}

View file

@ -0,0 +1,70 @@
package timeout
import (
"os"
"strconv"
"time"
)
var (
// defaultTimeout is the timeout for most operations that is not overridden.
defaultTimeout = 4 * time.Minute
// defaultTimeoutTestdRetry is the retry loop timeout for testd to respond
// for a disk to come online in LCOW.
defaultTimeoutTestdRetry = 5 * time.Second
)
// External variables for HCSShim consumers to use.
var (
// SystemCreate is the timeout for creating a compute system
SystemCreate time.Duration = defaultTimeout
// SystemStart is the timeout for starting a compute system
SystemStart time.Duration = defaultTimeout
// SystemPause is the timeout for pausing a compute system
SystemPause time.Duration = defaultTimeout
// SystemResume is the timeout for resuming a compute system
SystemResume time.Duration = defaultTimeout
// SyscallWatcher is the timeout before warning of a potential stuck platform syscall.
SyscallWatcher time.Duration = defaultTimeout
// Tar2VHD is the timeout for the tar2vhd operation to complete
Tar2VHD time.Duration = defaultTimeout
// ExternalCommandToStart is the timeout for external commands to start
ExternalCommandToStart = defaultTimeout
// ExternalCommandToComplete is the timeout for external commands to complete.
// Generally this means copying data from their stdio pipes.
ExternalCommandToComplete = defaultTimeout
// TestDRetryLoop is the timeout for testd retry loop when onlining a SCSI disk in LCOW
TestDRetryLoop = defaultTimeoutTestdRetry
)
func init() {
SystemCreate = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMCREATE", SystemCreate)
SystemStart = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMSTART", SystemStart)
SystemPause = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMPAUSE", SystemPause)
SystemResume = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSTEMRESUME", SystemResume)
SyscallWatcher = durationFromEnvironment("HCSSHIM_TIMEOUT_SYSCALLWATCHER", SyscallWatcher)
Tar2VHD = durationFromEnvironment("HCSSHIM_TIMEOUT_TAR2VHD", Tar2VHD)
ExternalCommandToStart = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDSTART", ExternalCommandToStart)
ExternalCommandToComplete = durationFromEnvironment("HCSSHIM_TIMEOUT_EXTERNALCOMMANDCOMPLETE", ExternalCommandToComplete)
TestDRetryLoop = durationFromEnvironment("HCSSHIM_TIMEOUT_TESTDRETRYLOOP", TestDRetryLoop)
}
func durationFromEnvironment(env string, defaultValue time.Duration) time.Duration {
envTimeout := os.Getenv(env)
if len(envTimeout) > 0 {
e, err := strconv.Atoi(envTimeout)
if err == nil && e > 0 {
return time.Second * time.Duration(e)
}
}
return defaultValue
}

View file

@ -0,0 +1,25 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// ActivateLayer will find the layer with the given id and mount it's filesystem.
// For a read/write layer, the mounted filesystem will appear as a volume on the
// host, while a read-only layer is generally expected to be a no-op.
// An activated layer must later be deactivated via DeactivateLayer.
func ActivateLayer(path string) error {
title := "hcsshim::ActivateLayer "
logrus.Debugf(title+"path %s", path)
err := activateLayer(&stdDriverInfo, path)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s", path)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded path=%s", path)
return nil
}

View file

@ -1,4 +1,4 @@
package hcsshim
package wclayer
import (
"errors"
@ -7,6 +7,8 @@ import (
"syscall"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/safefile"
)
type baseLayerWriter struct {
@ -29,7 +31,7 @@ type dirInfo struct {
func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error {
for i := range dis {
di := &dis[len(dis)-i-1] // reverse order: process child directories first
f, err := openRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_OPEN, _FILE_DIRECTORY_FILE)
f, err := safefile.OpenRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_OPEN, safefile.FILE_DIRECTORY_FILE)
if err != nil {
return err
}
@ -84,21 +86,21 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e
extraFlags := uint32(0)
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
extraFlags |= _FILE_DIRECTORY_FILE
extraFlags |= safefile.FILE_DIRECTORY_FILE
if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 {
w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo})
}
}
mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY)
f, err = openRelative(name, w.root, mode, syscall.FILE_SHARE_READ, _FILE_CREATE, extraFlags)
f, err = safefile.OpenRelative(name, w.root, mode, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, extraFlags)
if err != nil {
return makeError(err, "Failed to openRelative", name)
return hcserror.New(err, "Failed to safefile.OpenRelative", name)
}
err = winio.SetFileBasicInfo(f, fileInfo)
if err != nil {
return makeError(err, "Failed to SetFileBasicInfo", name)
return hcserror.New(err, "Failed to SetFileBasicInfo", name)
}
w.f = f
@ -119,7 +121,7 @@ func (w *baseLayerWriter) AddLink(name string, target string) (err error) {
return err
}
return linkRelative(target, w.root, name, w.root)
return safefile.LinkRelative(target, w.root, name, w.root)
}
func (w *baseLayerWriter) Remove(name string) error {
@ -157,7 +159,7 @@ func (w *baseLayerWriter) Close() error {
}
if w.hasUtilityVM {
err := ensureNotReparsePointRelative("UtilityVM", w.root)
err := safefile.EnsureNotReparsePointRelative("UtilityVM", w.root)
if err != nil {
return err
}

View file

@ -0,0 +1,23 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// CreateLayer creates a new, empty, read-only layer on the filesystem based on
// the parent layer provided.
func CreateLayer(path, parent string) error {
title := "hcsshim::CreateLayer "
logrus.Debugf(title+"Flavour %d ID %s parent %s", path, parent)
err := createLayer(&stdDriverInfo, path, parent)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s parent=%s flavour=%d", path, parent)
logrus.Error(err)
return err
}
logrus.Debugf(title+" - succeeded path=%s parent=%s flavour=%d", path, parent)
return nil
}

View file

@ -0,0 +1,31 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// CreateScratchLayer creates and populates new read-write layer for use by a container.
// This requires both the id of the direct parent layer, as well as the full list
// of paths to all parent layers up to the base (and including the direct parent
// whose id was provided).
func CreateScratchLayer(path string, parentLayerPaths []string) error {
title := "hcsshim::CreateScratchLayer "
logrus.Debugf(title+"path %s", path)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return err
}
err = createSandboxLayer(&stdDriverInfo, path, 0, layers)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s", path)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded path=%s", path)
return nil
}

View file

@ -0,0 +1,22 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// DeactivateLayer will dismount a layer that was mounted via ActivateLayer.
func DeactivateLayer(path string) error {
title := "hcsshim::DeactivateLayer "
logrus.Debugf(title+"path %s", path)
err := deactivateLayer(&stdDriverInfo, path)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s", path)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded path=%s", path)
return nil
}

View file

@ -0,0 +1,23 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// DestroyLayer will remove the on-disk files representing the layer with the given
// path, including that layer's containing folder, if any.
func DestroyLayer(path string) error {
title := "hcsshim::DestroyLayer "
logrus.Debugf(title+"path %s", path)
err := destroyLayer(&stdDriverInfo, path)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s", path)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded path=%s", path)
return nil
}

View file

@ -0,0 +1,22 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// ExpandScratchSize expands the size of a layer to at least size bytes.
func ExpandScratchSize(path string, size uint64) error {
title := "hcsshim::ExpandScratchSize "
logrus.Debugf(title+"path=%s size=%d", path, size)
err := expandSandboxSize(&stdDriverInfo, path, size)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s size=%d", path, size)
logrus.Error(err)
return err
}
logrus.Debugf(title+"- succeeded path=%s size=%d", path, size)
return nil
}

View file

@ -1,4 +1,4 @@
package hcsshim
package wclayer
import (
"io"
@ -7,6 +7,8 @@ import (
"syscall"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
@ -15,9 +17,9 @@ import (
// format includes any metadata required for later importing the layer (using
// ImportLayer), and requires the full list of parent layer paths in order to
// perform the export.
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
func ExportLayer(path string, exportFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ExportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath)
logrus.Debugf(title+"path %s folder %s", path, exportFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
@ -25,21 +27,14 @@ func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, paren
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s folder=%s", path, exportFolderPath)
logrus.Error(err)
return err
}
err = exportLayer(&infop, layerId, exportFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath)
logrus.Debugf(title+"succeeded path=%s folder=%s", path, exportFolderPath)
return nil
}
@ -69,11 +64,11 @@ func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error)
if err == syscall.ERROR_NO_MORE_FILES {
err = io.EOF
} else {
err = makeError(err, "ExportLayerNext", "")
err = hcserror.New(err, "ExportLayerNext", "")
}
return "", 0, nil, err
}
fileName := convertAndFreeCoTaskMemString(fileNamep)
fileName := interop.ConvertAndFreeCoTaskMemString(fileNamep)
if deleted != 0 {
fileInfo = nil
}
@ -88,7 +83,7 @@ func (r *FilterLayerReader) Read(b []byte) (int, error) {
var bytesRead uint32
err := exportLayerRead(r.context, b, &bytesRead)
if err != nil {
return 0, makeError(err, "ExportLayerRead", "")
return 0, hcserror.New(err, "ExportLayerRead", "")
}
if bytesRead == 0 {
return 0, io.EOF
@ -103,7 +98,7 @@ func (r *FilterLayerReader) Close() (err error) {
if r.context != 0 {
err = exportLayerEnd(r.context)
if err != nil {
err = makeError(err, "ExportLayerEnd", "")
err = hcserror.New(err, "ExportLayerEnd", "")
}
r.context = 0
}
@ -113,34 +108,30 @@ func (r *FilterLayerReader) Close() (err error) {
// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer.
// The caller must have taken the SeBackupPrivilege privilege
// to call this and any methods on the resulting LayerReader.
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
func NewLayerReader(path string, parentLayerPaths []string) (LayerReader, error) {
if procExportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
exportPath, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
err = ExportLayer(info, layerID, path, parentLayerPaths)
err = ExportLayer(path, exportPath, parentLayerPaths)
if err != nil {
os.RemoveAll(path)
os.RemoveAll(exportPath)
return nil, err
}
return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil
return &legacyLayerReaderWrapper{newLegacyLayerReader(exportPath)}, nil
}
layers, err := layerPathsToDescriptors(parentLayerPaths)
if err != nil {
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
r := &FilterLayerReader{}
err = exportLayerBegin(&infop, layerID, layers, &r.context)
err = exportLayerBegin(&stdDriverInfo, path, layers, &r.context)
if err != nil {
return nil, makeError(err, "ExportLayerBegin", "")
return nil, hcserror.New(err, "ExportLayerBegin", "")
}
return r, err
}

View file

@ -1,34 +1,28 @@
package hcsshim
package wclayer
import (
"syscall"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// GetLayerMountPath will look for a mounted layer with the given id and return
// GetLayerMountPath will look for a mounted layer with the given path and return
// the path at which that layer can be accessed. This path may be a volume path
// if the layer is a mounted read-write layer, otherwise it is expected to be the
// folder path at which the layer is stored.
func GetLayerMountPath(info DriverInfo, id string) (string, error) {
func GetLayerMountPath(path string) (string, error) {
title := "hcsshim::GetLayerMountPath "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return "", err
}
logrus.Debugf(title+"path %s", path)
var mountPathLength uintptr
mountPathLength = 0
// Call the procedure itself.
logrus.Debugf("Calling proc (1)")
err = getLayerMountPath(&infop, id, &mountPathLength, nil)
err := getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
if err != nil {
err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour)
err = hcserror.Errorf(err, title, "(first call) path=%s", path)
logrus.Error(err)
return "", err
}
@ -42,14 +36,14 @@ func GetLayerMountPath(info DriverInfo, id string) (string, error) {
// Call the procedure again
logrus.Debugf("Calling proc (2)")
err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0])
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
if err != nil {
err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour)
err = hcserror.Errorf(err, title, "(second call) path=%s", path)
logrus.Error(err)
return "", err
}
path := syscall.UTF16ToString(mountPathp[0:])
logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path)
return path, nil
mountPath := syscall.UTF16ToString(mountPathp[0:])
logrus.Debugf(title+"succeeded path=%s mountPath=%s", path, mountPath)
return mountPath, nil
}

View file

@ -1,6 +1,10 @@
package hcsshim
package wclayer
import "github.com/sirupsen/logrus"
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/sirupsen/logrus"
)
// GetSharedBaseImages will enumerate the images stored in the common central
// image store and return descriptive info about those images for the purpose
@ -12,11 +16,11 @@ func GetSharedBaseImages() (imageData string, err error) {
var buffer *uint16
err = getBaseImages(&buffer)
if err != nil {
err = makeError(err, title, "")
err = hcserror.New(err, title, "")
logrus.Error(err)
return
}
imageData = convertAndFreeCoTaskMemString(buffer)
imageData = interop.ConvertAndFreeCoTaskMemString(buffer)
logrus.Debugf(title+" - succeeded output=%s", imageData)
return
}

View file

@ -0,0 +1,24 @@
package wclayer
import (
"fmt"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// GrantVmAccess adds access to a file for a given VM
func GrantVmAccess(vmid string, filepath string) error {
title := fmt.Sprintf("hcsshim::GrantVmAccess id:%s path:%s ", vmid, filepath)
logrus.Debugf(title)
err := grantVmAccess(vmid, filepath)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s", filepath)
logrus.Error(err)
return err
}
logrus.Debugf(title + " - succeeded")
return nil
}

View file

@ -1,4 +1,4 @@
package hcsshim
package wclayer
import (
"errors"
@ -7,6 +7,8 @@ import (
"path/filepath"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/Microsoft/hcsshim/internal/safefile"
"github.com/sirupsen/logrus"
)
@ -14,9 +16,9 @@ import (
// that into a layer with the id layerId. Note that in order to correctly populate
// the layer and interperet the transport format, all parent layers must already
// be present on the system at the paths provided in parentLayerPaths.
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
func ImportLayer(path string, importFolderPath string, parentLayerPaths []string) error {
title := "hcsshim::ImportLayer "
logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath)
logrus.Debugf(title+"path %s folder %s", path, importFolderPath)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
@ -24,21 +26,14 @@ func ImportLayer(info DriverInfo, layerID string, importFolderPath string, paren
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
err = importLayer(&stdDriverInfo, path, importFolderPath, layers)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s folder=%s", path, importFolderPath)
logrus.Error(err)
return err
}
err = importLayer(&infop, layerID, importFolderPath, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath)
logrus.Debugf(title+"succeeded path=%s folder=%s", path, importFolderPath)
return nil
}
@ -73,7 +68,7 @@ func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
}
err := importLayerNext(w.context, name, fileInfo)
if err != nil {
return makeError(err, "ImportLayerNext", "")
return hcserror.New(err, "ImportLayerNext", "")
}
return nil
}
@ -92,7 +87,7 @@ func (w *FilterLayerWriter) Remove(name string) error {
}
err := importLayerNext(w.context, name, nil)
if err != nil {
return makeError(err, "ImportLayerNext", "")
return hcserror.New(err, "ImportLayerNext", "")
}
return nil
}
@ -101,7 +96,7 @@ func (w *FilterLayerWriter) Remove(name string) error {
func (w *FilterLayerWriter) Write(b []byte) (int, error) {
err := importLayerWrite(w.context, b)
if err != nil {
err = makeError(err, "ImportLayerWrite", "")
err = hcserror.New(err, "ImportLayerWrite", "")
return 0, err
}
return len(b), err
@ -113,7 +108,7 @@ func (w *FilterLayerWriter) Close() (err error) {
if w.context != 0 {
err = importLayerEnd(w.context)
if err != nil {
err = makeError(err, "ImportLayerEnd", "")
err = hcserror.New(err, "ImportLayerEnd", "")
}
w.context = 0
}
@ -122,8 +117,6 @@ func (w *FilterLayerWriter) Close() (err error) {
type legacyLayerWriterWrapper struct {
*legacyLayerWriter
info DriverInfo
layerID string
path string
parentLayerPaths []string
}
@ -136,28 +129,26 @@ func (r *legacyLayerWriterWrapper) Close() error {
return err
}
info := r.info
info.HomeDir = ""
if err = ImportLayer(info, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil {
if err = ImportLayer(r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil {
return err
}
for _, name := range r.Tombstones {
if err = removeRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) {
if err = safefile.RemoveRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) {
return err
}
}
// Add any hard links that were collected.
for _, lnk := range r.PendingLinks {
if err = removeRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) {
if err = safefile.RemoveRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) {
return err
}
if err = linkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil {
if err = safefile.LinkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil {
return err
}
}
// Prepare the utility VM for use if one is present in the layer.
if r.HasUtilityVM {
err := ensureNotReparsePointRelative("UtilityVM", r.destRoot)
err := safefile.EnsureNotReparsePointRelative("UtilityVM", r.destRoot)
if err != nil {
return err
}
@ -172,10 +163,10 @@ func (r *legacyLayerWriterWrapper) Close() error {
// NewLayerWriter returns a new layer writer for creating a layer on disk.
// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges
// to call this and any methods on the resulting LayerWriter.
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
func NewLayerWriter(path string, parentLayerPaths []string) (LayerWriter, error) {
if len(parentLayerPaths) == 0 {
// This is a base layer. It gets imported differently.
f, err := openRoot(filepath.Join(info.HomeDir, layerID))
f, err := safefile.OpenRoot(path)
if err != nil {
return nil, err
}
@ -187,19 +178,17 @@ func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string)
if procImportLayerBegin.Find() != nil {
// The new layer reader is not available on this Windows build. Fall back to the
// legacy export code path.
path, err := ioutil.TempDir("", "hcs")
importPath, err := ioutil.TempDir("", "hcs")
if err != nil {
return nil, err
}
w, err := newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID))
w, err := newLegacyLayerWriter(importPath, parentLayerPaths, path)
if err != nil {
return nil, err
}
return &legacyLayerWriterWrapper{
legacyLayerWriter: w,
info: info,
layerID: layerID,
path: path,
path: importPath,
parentLayerPaths: parentLayerPaths,
}, nil
}
@ -208,15 +197,10 @@ func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string)
return nil, err
}
infop, err := convertDriverInfo(info)
if err != nil {
return nil, err
}
w := &FilterLayerWriter{}
err = importLayerBegin(&infop, layerID, layers, &w.context)
err = importLayerBegin(&stdDriverInfo, path, layers, &w.context)
if err != nil {
return nil, makeError(err, "ImportLayerStart", "")
return nil, hcserror.New(err, "ImportLayerStart", "")
}
return w, nil
}

View file

@ -0,0 +1,25 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// LayerExists will return true if a layer with the given id exists and is known
// to the system.
func LayerExists(path string) (bool, error) {
title := "hcsshim::LayerExists "
logrus.Debugf(title+"path %s", path)
// Call the procedure itself.
var exists uint32
err := layerExists(&stdDriverInfo, path, &exists)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s", path)
logrus.Error(err)
return false, err
}
logrus.Debugf(title+"succeeded path=%s exists=%d", path, exists)
return exists != 0, nil
}

View file

@ -0,0 +1,13 @@
package wclayer
import (
"path/filepath"
"github.com/Microsoft/hcsshim/internal/guid"
)
// LayerID returns the layer ID of a layer on disk.
func LayerID(path string) (guid.GUID, error) {
_, file := filepath.Split(path)
return NameToGuid(file)
}

View file

@ -1,12 +1,12 @@
package hcsshim
package wclayer
// This file contains utility functions to support storage (graph) related
// functionality.
import (
"path/filepath"
"syscall"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/sirupsen/logrus"
)
@ -22,28 +22,16 @@ struct DriverInfo {
LPCWSTR HomeDir;
};
*/
type DriverInfo struct {
Flavour int
HomeDir string
}
type driverInfo struct {
Flavour int
HomeDirp *uint16
}
func convertDriverInfo(info DriverInfo) (driverInfo, error) {
homedirp, err := syscall.UTF16PtrFromString(info.HomeDir)
if err != nil {
logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error())
return driverInfo{}, err
}
return driverInfo{
Flavour: info.Flavour,
HomeDirp: homedirp,
}, nil
}
var (
utf16EmptyString uint16
stdDriverInfo = driverInfo{1, &utf16EmptyString}
)
/* To pass into syscall, we need a struct matching the following:
typedef struct _WC_LAYER_DESCRIPTOR {
@ -75,7 +63,7 @@ typedef struct _WC_LAYER_DESCRIPTOR {
} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR;
*/
type WC_LAYER_DESCRIPTOR struct {
LayerId GUID
LayerId guid.GUID
Flags uint32
Pathp *uint16
}
@ -85,10 +73,7 @@ func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR,
var layers []WC_LAYER_DESCRIPTOR
for i := 0; i < len(parentLayerPaths); i++ {
// Create a layer descriptor, using the folder name
// as the source for a GUID LayerId
_, folderName := filepath.Split(parentLayerPaths[i])
g, err := NameToGuid(folderName)
g, err := LayerID(parentLayerPaths[i])
if err != nil {
logrus.Debugf("Failed to convert name to guid %s", err)
return nil, err

View file

@ -1,4 +1,4 @@
package hcsshim
package wclayer
import (
"bufio"
@ -6,12 +6,15 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/longpath"
"github.com/Microsoft/hcsshim/internal/safefile"
)
var errorIterationCanceled = errors.New("")
@ -34,23 +37,6 @@ func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os
return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition)
}
func makeLongAbsPath(path string) (string, error) {
if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
return path, nil
}
if !filepath.IsAbs(path) {
absPath, err := filepath.Abs(path)
if err != nil {
return "", err
}
path = absPath
}
if strings.HasPrefix(path, `\\`) {
return `\\?\UNC\` + path[2:], nil
}
return `\\?\` + path, nil
}
func hasPathPrefix(p, prefix string) bool {
return strings.HasPrefix(p, prefix) && len(p) > len(prefix) && p[len(prefix)] == '\\'
}
@ -106,7 +92,7 @@ func readTombstones(path string) (map[string]([]string), error) {
}
func (r *legacyLayerReader) walkUntilCancelled() error {
root, err := makeLongAbsPath(r.root)
root, err := longpath.LongAbs(r.root)
if err != nil {
return err
}
@ -283,7 +269,7 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil
if err != nil {
return
}
fileInfo.FileAttributes = uintptr(attr)
fileInfo.FileAttributes = attr
beginning := int64(4)
// Find the accurate file size.
@ -349,6 +335,7 @@ type legacyLayerWriter struct {
destRoot *os.File
parentRoots []*os.File
currentFile *os.File
bufWriter *bufio.Writer
currentFileName string
currentFileRoot *os.File
backupWriter *winio.BackupFileWriter
@ -373,21 +360,22 @@ func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w
w = nil
}
}()
w.root, err = openRoot(root)
w.root, err = safefile.OpenRoot(root)
if err != nil {
return
}
w.destRoot, err = openRoot(destRoot)
w.destRoot, err = safefile.OpenRoot(destRoot)
if err != nil {
return
}
for _, r := range parentRoots {
f, err := openRoot(r)
f, err := safefile.OpenRoot(r)
if err != nil {
return w, err
}
w.parentRoots = append(w.parentRoots, f)
}
w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536)
return
}
@ -408,7 +396,7 @@ func (w *legacyLayerWriter) CloseRoots() {
func (w *legacyLayerWriter) initUtilityVM() error {
if !w.HasUtilityVM {
err := mkdirRelative(utilityVMPath, w.destRoot)
err := safefile.MkdirRelative(utilityVMPath, w.destRoot)
if err != nil {
return err
}
@ -426,6 +414,11 @@ func (w *legacyLayerWriter) initUtilityVM() error {
}
func (w *legacyLayerWriter) reset() error {
err := w.bufWriter.Flush()
if err != nil {
return err
}
w.bufWriter.Reset(ioutil.Discard)
if w.currentIsDir {
r := w.currentFile
br := winio.NewBackupStreamReader(r)
@ -449,7 +442,7 @@ func (w *legacyLayerWriter) reset() error {
// describes a directory reparse point. Delete the placeholder
// directory to prevent future files being added into the
// destination of the reparse point during the ImportLayer call
if err := removeRelative(w.currentFileName, w.currentFileRoot); err != nil {
if err := safefile.RemoveRelative(w.currentFileName, w.currentFileRoot); err != nil {
return err
}
w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot})
@ -474,13 +467,13 @@ func (w *legacyLayerWriter) reset() error {
// copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata
func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) {
src, err := openRelative(
src, err := safefile.OpenRelative(
subPath,
srcRoot,
syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
_FILE_OPEN,
_FILE_OPEN_REPARSE_POINT)
safefile.FILE_OPEN,
safefile.FILE_OPEN_REPARSE_POINT)
if err != nil {
return nil, err
}
@ -495,14 +488,14 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool
extraFlags := uint32(0)
if isDir {
extraFlags |= _FILE_DIRECTORY_FILE
extraFlags |= safefile.FILE_DIRECTORY_FILE
}
dest, err := openRelative(
dest, err := safefile.OpenRelative(
subPath,
destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
_FILE_CREATE,
safefile.FILE_CREATE,
extraFlags)
if err != nil {
return nil, err
@ -534,7 +527,7 @@ func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool
// the file names in the provided map and just copies those files.
func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error {
var di []dirInfo
err := ensureNotReparsePointRelative(subPath, srcRoot)
err := safefile.EnsureNotReparsePointRelative(subPath, srcRoot)
if err != nil {
return err
}
@ -566,18 +559,12 @@ func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles
di = append(di, dirInfo{path: relPath, fileInfo: *fi})
}
} else {
err = linkRelative(relPath, srcRoot, relPath, destRoot)
err = safefile.LinkRelative(relPath, srcRoot, relPath, destRoot)
if err != nil {
return err
}
}
// Don't recurse on reparse points in go1.8 and older. Filepath.Walk
// handles this in go1.9 and newer.
if isDir && isReparsePoint && shouldSkipDirectoryReparse {
return filepath.SkipDir
}
return nil
})
if err != nil {
@ -604,9 +591,9 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath {
return errors.New("invalid UtilityVM layer")
}
createDisposition := uint32(_FILE_OPEN)
createDisposition := uint32(safefile.FILE_OPEN)
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
st, err := lstatRelative(name, w.destRoot)
st, err := safefile.LstatRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
@ -614,14 +601,14 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
// Delete the existing file/directory if it is not the same type as this directory.
existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes
if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 {
if err = removeAllRelative(name, w.destRoot); err != nil {
if err = safefile.RemoveAllRelative(name, w.destRoot); err != nil {
return err
}
st = nil
}
}
if st == nil {
if err = mkdirRelative(name, w.destRoot); err != nil {
if err = safefile.MkdirRelative(name, w.destRoot); err != nil {
return err
}
}
@ -630,20 +617,20 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
}
} else {
// Overwrite any existing hard link.
err := removeRelative(name, w.destRoot)
err := safefile.RemoveRelative(name, w.destRoot)
if err != nil && !os.IsNotExist(err) {
return err
}
createDisposition = _FILE_CREATE
createDisposition = safefile.FILE_CREATE
}
f, err := openRelative(
f, err := safefile.OpenRelative(
name,
w.destRoot,
syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY,
syscall.FILE_SHARE_READ,
createDisposition,
_FILE_OPEN_REPARSE_POINT,
safefile.FILE_OPEN_REPARSE_POINT,
)
if err != nil {
return err
@ -651,7 +638,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
defer func() {
if f != nil {
f.Close()
removeRelative(name, w.destRoot)
safefile.RemoveRelative(name, w.destRoot)
}
}()
@ -661,6 +648,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
}
w.backupWriter = winio.NewBackupFileWriter(f, true)
w.bufWriter.Reset(w.backupWriter)
w.currentFile = f
w.currentFileName = name
w.currentFileRoot = w.destRoot
@ -671,7 +659,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
fname := name
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
err := mkdirRelative(name, w.root)
err := safefile.MkdirRelative(name, w.root)
if err != nil {
return err
}
@ -679,14 +667,14 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
w.currentIsDir = true
}
f, err := openRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_CREATE, 0)
f, err := safefile.OpenRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, safefile.FILE_CREATE, 0)
if err != nil {
return err
}
defer func() {
if f != nil {
f.Close()
removeRelative(fname, w.root)
safefile.RemoveRelative(fname, w.root)
}
}()
@ -699,10 +687,13 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro
if hasPathPrefix(name, hivesPath) {
w.backupWriter = winio.NewBackupFileWriter(f, false)
w.bufWriter.Reset(w.backupWriter)
} else {
w.bufWriter.Reset(f)
// The file attributes are written before the stream.
err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes))
err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes))
if err != nil {
w.bufWriter.Reset(ioutil.Discard)
return err
}
}
@ -744,7 +735,7 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error {
selectedRoot = w.destRoot
} else {
for _, r := range roots {
if _, err := lstatRelative(target, r); err != nil {
if _, err := safefile.LstatRelative(target, r); err != nil {
if !os.IsNotExist(err) {
return err
}
@ -780,10 +771,10 @@ func (w *legacyLayerWriter) Remove(name string) error {
// Make sure the path exists; os.RemoveAll will not fail if the file is
// already gone, and this needs to be a fatal error for diagnostics
// purposes.
if _, err := lstatRelative(name, w.destRoot); err != nil {
if _, err := safefile.LstatRelative(name, w.destRoot); err != nil {
return err
}
err = removeAllRelative(name, w.destRoot)
err = safefile.RemoveAllRelative(name, w.destRoot)
if err != nil {
return err
}
@ -795,24 +786,21 @@ func (w *legacyLayerWriter) Remove(name string) error {
}
func (w *legacyLayerWriter) Write(b []byte) (int, error) {
if w.backupWriter == nil {
if w.currentFile == nil {
return 0, errors.New("closed")
}
return w.currentFile.Write(b)
if w.backupWriter == nil && w.currentFile == nil {
return 0, errors.New("closed")
}
return w.backupWriter.Write(b)
return w.bufWriter.Write(b)
}
func (w *legacyLayerWriter) Close() error {
if err := w.reset(); err != nil {
return err
}
if err := removeRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) {
if err := safefile.RemoveRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) {
return err
}
for _, pd := range w.pendingDirs {
err := mkdirRelative(pd.Path, pd.Root)
err := safefile.MkdirRelative(pd.Path, pd.Root)
if err != nil {
return err
}

View file

@ -0,0 +1,24 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// NameToGuid converts the given string into a GUID using the algorithm in the
// Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients.
func NameToGuid(name string) (id guid.GUID, err error) {
title := "hcsshim::NameToGuid "
err = nameToGuid(name, &id)
if err != nil {
err = hcserror.Errorf(err, title, "name=%s", name)
logrus.Error(err)
return
}
logrus.Debugf(title+"name:%s guid:%s", name, id.String())
return
}

View file

@ -1,21 +1,22 @@
package hcsshim
package wclayer
import (
"sync"
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
var prepareLayerLock sync.Mutex
// PrepareLayer finds a mounted read-write layer matching layerId and enables the
// PrepareLayer finds a mounted read-write layer matching path and enables the
// the filesystem filter for use on that layer. This requires the paths to all
// parent layers, and is necessary in order to view or interact with the layer
// as an actual filesystem (reading and writing files, creating directories, etc).
// Disabling the filter must be done via UnprepareLayer.
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
func PrepareLayer(path string, parentLayerPaths []string) error {
title := "hcsshim::PrepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
logrus.Debugf(title+"path %s", path)
// Generate layer descriptors
layers, err := layerPathsToDescriptors(parentLayerPaths)
@ -23,24 +24,17 @@ func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) er
return err
}
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
// This lock is a temporary workaround for a Windows bug. Only allowing one
// call to prepareLayer at a time vastly reduces the chance of a timeout.
prepareLayerLock.Lock()
defer prepareLayerLock.Unlock()
err = prepareLayer(&infop, layerId, layers)
err = prepareLayer(&stdDriverInfo, path, layers)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
err = hcserror.Errorf(err, title, "path=%s", path)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId)
logrus.Debugf(title+"succeeded path=%s", path)
return nil
}

View file

@ -0,0 +1,23 @@
package wclayer
import (
"github.com/Microsoft/hcsshim/internal/hcserror"
"github.com/sirupsen/logrus"
)
// UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id.
func UnprepareLayer(path string) error {
title := "hcsshim::UnprepareLayer "
logrus.Debugf(title+"path %s", path)
err := unprepareLayer(&stdDriverInfo, path)
if err != nil {
err = hcserror.Errorf(err, title, "path=%s", path)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded path=%s", path)
return nil
}

View file

@ -0,0 +1,37 @@
package wclayer
import "github.com/Microsoft/hcsshim/internal/guid"
//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go -winio wclayer.go
//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer?
//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer?
//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer?
//sys createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer?
//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize?
//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer?
//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer?
//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer?
//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath?
//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages?
//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer?
//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists?
//sys nameToGuid(name string, guid *_guid) (hr error) = vmcompute.NameToGuid?
//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer?
//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer?
//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage?
//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage?
//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin?
//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext?
//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite?
//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd?
//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin?
//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext?
//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead?
//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd?
//sys grantVmAccess(vmid string, filepath string) (hr error) = vmcompute.GrantVmAccess?
type _guid = guid.GUID

View file

@ -0,0 +1,597 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package wclayer
import (
"syscall"
"unsafe"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modvmcompute = windows.NewLazySystemDLL("vmcompute.dll")
procActivateLayer = modvmcompute.NewProc("ActivateLayer")
procCopyLayer = modvmcompute.NewProc("CopyLayer")
procCreateLayer = modvmcompute.NewProc("CreateLayer")
procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer")
procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize")
procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer")
procDestroyLayer = modvmcompute.NewProc("DestroyLayer")
procExportLayer = modvmcompute.NewProc("ExportLayer")
procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath")
procGetBaseImages = modvmcompute.NewProc("GetBaseImages")
procImportLayer = modvmcompute.NewProc("ImportLayer")
procLayerExists = modvmcompute.NewProc("LayerExists")
procNameToGuid = modvmcompute.NewProc("NameToGuid")
procPrepareLayer = modvmcompute.NewProc("PrepareLayer")
procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer")
procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage")
procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage")
procImportLayerBegin = modvmcompute.NewProc("ImportLayerBegin")
procImportLayerNext = modvmcompute.NewProc("ImportLayerNext")
procImportLayerWrite = modvmcompute.NewProc("ImportLayerWrite")
procImportLayerEnd = modvmcompute.NewProc("ImportLayerEnd")
procExportLayerBegin = modvmcompute.NewProc("ExportLayerBegin")
procExportLayerNext = modvmcompute.NewProc("ExportLayerNext")
procExportLayerRead = modvmcompute.NewProc("ExportLayerRead")
procExportLayerEnd = modvmcompute.NewProc("ExportLayerEnd")
procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess")
)
func activateLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _activateLayer(info, _p0)
}
func _activateLayer(info *driverInfo, id *uint16) (hr error) {
if hr = procActivateLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(srcId)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(dstId)
if hr != nil {
return
}
return _copyLayer(info, _p0, _p1, descriptors)
}
func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p2 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p2 = &descriptors[0]
}
if hr = procCopyLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func createLayer(info *driverInfo, id string, parent string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(parent)
if hr != nil {
return
}
return _createLayer(info, _p0, _p1)
}
func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) {
if hr = procCreateLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _createSandboxLayer(info, _p0, parent, descriptors)
}
func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p1 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p1 = &descriptors[0]
}
if hr = procCreateSandboxLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _expandSandboxSize(info, _p0, size)
}
func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) {
if hr = procExpandSandboxSize.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func deactivateLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _deactivateLayer(info, _p0)
}
func _deactivateLayer(info *driverInfo, id *uint16) (hr error) {
if hr = procDeactivateLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func destroyLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _destroyLayer(info, _p0)
}
func _destroyLayer(info *driverInfo, id *uint16) (hr error) {
if hr = procDestroyLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _exportLayer(info, _p0, _p1, descriptors)
}
func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p2 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p2 = &descriptors[0]
}
if hr = procExportLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _getLayerMountPath(info, _p0, length, buffer)
}
func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) {
if hr = procGetLayerMountPath.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func getBaseImages(buffer **uint16) (hr error) {
if hr = procGetBaseImages.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _importLayer(info, _p0, _p1, descriptors)
}
func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p2 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p2 = &descriptors[0]
}
if hr = procImportLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func layerExists(info *driverInfo, id string, exists *uint32) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _layerExists(info, _p0, exists)
}
func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) {
if hr = procLayerExists.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func nameToGuid(name string, guid *_guid) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(name)
if hr != nil {
return
}
return _nameToGuid(_p0, guid)
}
func _nameToGuid(name *uint16, guid *_guid) (hr error) {
if hr = procNameToGuid.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _prepareLayer(info, _p0, descriptors)
}
func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) {
var _p1 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p1 = &descriptors[0]
}
if hr = procPrepareLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func unprepareLayer(info *driverInfo, id string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _unprepareLayer(info, _p0)
}
func _unprepareLayer(info *driverInfo, id *uint16) (hr error) {
if hr = procUnprepareLayer.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func processBaseImage(path string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _processBaseImage(_p0)
}
func _processBaseImage(path *uint16) (hr error) {
if hr = procProcessBaseImage.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func processUtilityImage(path string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(path)
if hr != nil {
return
}
return _processUtilityImage(_p0)
}
func _processUtilityImage(path *uint16) (hr error) {
if hr = procProcessUtilityImage.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _importLayerBegin(info, _p0, descriptors, context)
}
func _importLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
var _p1 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p1 = &descriptors[0]
}
if hr = procImportLayerBegin.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procImportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(fileName)
if hr != nil {
return
}
return _importLayerNext(context, _p0, fileInfo)
}
func _importLayerNext(context uintptr, fileName *uint16, fileInfo *winio.FileBasicInfo) (hr error) {
if hr = procImportLayerNext.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procImportLayerNext.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func importLayerWrite(context uintptr, buffer []byte) (hr error) {
var _p0 *byte
if len(buffer) > 0 {
_p0 = &buffer[0]
}
if hr = procImportLayerWrite.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procImportLayerWrite.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)))
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func importLayerEnd(context uintptr) (hr error) {
if hr = procImportLayerEnd.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procImportLayerEnd.Addr(), 1, uintptr(context), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(id)
if hr != nil {
return
}
return _exportLayerBegin(info, _p0, descriptors, context)
}
func _exportLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) {
var _p1 *WC_LAYER_DESCRIPTOR
if len(descriptors) > 0 {
_p1 = &descriptors[0]
}
if hr = procExportLayerBegin.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procExportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) {
if hr = procExportLayerNext.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procExportLayerNext.Addr(), 5, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)), uintptr(unsafe.Pointer(fileSize)), uintptr(unsafe.Pointer(deleted)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) {
var _p0 *byte
if len(buffer) > 0 {
_p0 = &buffer[0]
}
if hr = procExportLayerRead.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall6(procExportLayerRead.Addr(), 4, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)), uintptr(unsafe.Pointer(bytesRead)), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func exportLayerEnd(context uintptr) (hr error) {
if hr = procExportLayerEnd.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procExportLayerEnd.Addr(), 1, uintptr(context), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}
func grantVmAccess(vmid string, filepath string) (hr error) {
var _p0 *uint16
_p0, hr = syscall.UTF16PtrFromString(vmid)
if hr != nil {
return
}
var _p1 *uint16
_p1, hr = syscall.UTF16PtrFromString(filepath)
if hr != nil {
return
}
return _grantVmAccess(_p0, _p1)
}
func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) {
if hr = procGrantVmAccess.Find(); hr != nil {
return
}
r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}

108
libnetwork/vendor/github.com/Microsoft/hcsshim/layer.go generated vendored Normal file
View file

@ -0,0 +1,108 @@
package hcsshim
import (
"crypto/sha1"
"path/filepath"
"github.com/Microsoft/hcsshim/internal/guid"
"github.com/Microsoft/hcsshim/internal/wclayer"
)
func layerPath(info *DriverInfo, id string) string {
return filepath.Join(info.HomeDir, id)
}
func ActivateLayer(info DriverInfo, id string) error {
return wclayer.ActivateLayer(layerPath(&info, id))
}
func CreateLayer(info DriverInfo, id, parent string) error {
return wclayer.CreateLayer(layerPath(&info, id), parent)
}
// New clients should use CreateScratchLayer instead. Kept in to preserve API compatibility.
func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths)
}
func CreateScratchLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error {
return wclayer.CreateScratchLayer(layerPath(&info, layerId), parentLayerPaths)
}
func DeactivateLayer(info DriverInfo, id string) error {
return wclayer.DeactivateLayer(layerPath(&info, id))
}
func DestroyLayer(info DriverInfo, id string) error {
return wclayer.DestroyLayer(layerPath(&info, id))
}
// New clients should use ExpandScratchSize instead. Kept in to preserve API compatibility.
func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error {
return wclayer.ExpandScratchSize(layerPath(&info, layerId), size)
}
func ExpandScratchSize(info DriverInfo, layerId string, size uint64) error {
return wclayer.ExpandScratchSize(layerPath(&info, layerId), size)
}
func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error {
return wclayer.ExportLayer(layerPath(&info, layerId), exportFolderPath, parentLayerPaths)
}
func GetLayerMountPath(info DriverInfo, id string) (string, error) {
return wclayer.GetLayerMountPath(layerPath(&info, id))
}
func GetSharedBaseImages() (imageData string, err error) {
return wclayer.GetSharedBaseImages()
}
func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error {
return wclayer.ImportLayer(layerPath(&info, layerID), importFolderPath, parentLayerPaths)
}
func LayerExists(info DriverInfo, id string) (bool, error) {
return wclayer.LayerExists(layerPath(&info, id))
}
func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error {
return wclayer.PrepareLayer(layerPath(&info, layerId), parentLayerPaths)
}
func ProcessBaseLayer(path string) error {
return wclayer.ProcessBaseLayer(path)
}
func ProcessUtilityVMImage(path string) error {
return wclayer.ProcessUtilityVMImage(path)
}
func UnprepareLayer(info DriverInfo, layerId string) error {
return wclayer.UnprepareLayer(layerPath(&info, layerId))
}
type DriverInfo struct {
Flavour int
HomeDir string
}
type FilterLayerReader = wclayer.FilterLayerReader
type FilterLayerWriter = wclayer.FilterLayerWriter
type GUID [16]byte
func NameToGuid(name string) (id GUID, err error) {
g, err := wclayer.NameToGuid(name)
return GUID(g), err
}
func NewGUID(source string) *GUID {
h := sha1.Sum([]byte(source))
var g GUID
copy(g[0:], h[0:16])
return &g
}
func (g *GUID) ToString() string {
return (guid.GUID)(*g).String()
}
type LayerReader = wclayer.LayerReader
func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) {
return wclayer.NewLayerReader(layerPath(&info, layerID), parentLayerPaths)
}
type LayerWriter = wclayer.LayerWriter
func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) {
return wclayer.NewLayerWriter(layerPath(&info, layerID), parentLayerPaths)
}
type WC_LAYER_DESCRIPTOR = wclayer.WC_LAYER_DESCRIPTOR

View file

@ -1,30 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// LayerExists will return true if a layer with the given id exists and is known
// to the system.
func LayerExists(info DriverInfo, id string) (bool, error) {
title := "hcsshim::LayerExists "
logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return false, err
}
// Call the procedure itself.
var exists uint32
err = layerExists(&infop, id, &exists)
if err != nil {
err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour)
logrus.Error(err)
return false, err
}
logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists)
return exists != 0, nil
}

View file

@ -1,7 +0,0 @@
// +build !go1.9
package hcsshim
// Due to a bug in go1.8 and before, directory reparse points need to be skipped
// during filepath.Walk. This is fixed in go1.9
var shouldSkipDirectoryReparse = true

View file

@ -1,7 +0,0 @@
// +build go1.9
package hcsshim
// Due to a bug in go1.8 and before, directory reparse points need to be skipped
// during filepath.Walk. This is fixed in go1.9
var shouldSkipDirectoryReparse = false

View file

@ -1,20 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// NameToGuid converts the given string into a GUID using the algorithm in the
// Host Compute Service, ensuring GUIDs generated with the same string are common
// across all clients.
func NameToGuid(name string) (id GUID, err error) {
title := "hcsshim::NameToGuid "
logrus.Debugf(title+"Name %s", name)
err = nameToGuid(name, &id)
if err != nil {
err = makeErrorf(err, title, "name=%s", name)
logrus.Error(err)
return
}
return
}

View file

@ -1,384 +1,72 @@
package hcsshim
import (
"encoding/json"
"io"
"sync"
"syscall"
"time"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/hcs"
)
// ContainerError is an error encountered in HCS
type process struct {
handleLock sync.RWMutex
handle hcsProcess
processID int
container *container
cachedPipes *cachedPipes
callbackNumber uintptr
p *hcs.Process
}
type cachedPipes struct {
stdIn syscall.Handle
stdOut syscall.Handle
stdErr syscall.Handle
}
type processModifyRequest struct {
Operation string
ConsoleSize *consoleSize `json:",omitempty"`
CloseHandle *closeHandle `json:",omitempty"`
}
type consoleSize struct {
Height uint16
Width uint16
}
type closeHandle struct {
Handle string
}
type processStatus struct {
ProcessID uint32
Exited bool
ExitCode uint32
LastWaitResult int32
}
const (
stdIn string = "StdIn"
stdOut string = "StdOut"
stdErr string = "StdErr"
)
const (
modifyConsoleSize string = "ConsoleSize"
modifyCloseHandle string = "CloseHandle"
)
// Pid returns the process ID of the process within the container.
func (process *process) Pid() int {
return process.processID
return process.p.Pid()
}
// Kill signals the process to terminate but does not wait for it to finish terminating.
func (process *process) Kill() error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Kill"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return makeProcessError(process, operation, "", ErrAlreadyClosed)
}
var resultp *uint16
err := hcsTerminateProcess(process.handle, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
return convertProcessError(process.p.Kill(), process)
}
// Wait waits for the process to exit.
func (process *process) Wait() error {
operation := "Wait"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
return convertProcessError(process.p.Wait(), process)
}
// WaitTimeout waits for the process to exit or the duration to elapse. It returns
// false if timeout occurs.
func (process *process) WaitTimeout(timeout time.Duration) error {
operation := "WaitTimeout"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
return convertProcessError(process.p.WaitTimeout(timeout), process)
}
// ExitCode returns the exit code of the process. The process must have
// already terminated.
func (process *process) ExitCode() (int, error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "ExitCode"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return 0, makeProcessError(process, operation, "", ErrAlreadyClosed)
}
properties, err := process.properties()
code, err := process.p.ExitCode()
if err != nil {
return 0, makeProcessError(process, operation, "", err)
err = convertProcessError(err, process)
}
if properties.Exited == false {
return 0, makeProcessError(process, operation, "", ErrInvalidProcessState)
}
if properties.LastWaitResult != 0 {
return 0, makeProcessError(process, operation, "", syscall.Errno(properties.LastWaitResult))
}
logrus.Debugf(title+" succeeded processid=%d exitCode=%d", process.processID, properties.ExitCode)
return int(properties.ExitCode), nil
return code, err
}
// ResizeConsole resizes the console of the process.
func (process *process) ResizeConsole(width, height uint16) error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "ResizeConsole"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return makeProcessError(process, operation, "", ErrAlreadyClosed)
}
modifyRequest := processModifyRequest{
Operation: modifyConsoleSize,
ConsoleSize: &consoleSize{
Height: height,
Width: width,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *process) properties() (*processStatus, error) {
operation := "properties"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
var (
resultp *uint16
propertiesp *uint16
)
err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, err
}
if propertiesp == nil {
return nil, ErrUnexpectedValue
}
propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp)
properties := &processStatus{}
if err := json.Unmarshal(propertiesRaw, properties); err != nil {
return nil, err
}
logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw)
return properties, nil
return convertProcessError(process.p.ResizeConsole(width, height), process)
}
// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing
// these pipes does not close the underlying pipes; it should be possible to
// call this multiple times to get multiple interfaces.
func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "Stdio"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return nil, nil, nil, makeProcessError(process, operation, "", ErrAlreadyClosed)
}
var stdIn, stdOut, stdErr syscall.Handle
if process.cachedPipes == nil {
var (
processInfo hcsProcessInformation
resultp *uint16
)
err := hcsGetProcessInfo(process.handle, &processInfo, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, "", err)
}
stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError
} else {
// Use cached pipes
stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr
// Invalidate the cache
process.cachedPipes = nil
}
pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr})
stdin, stdout, stderr, err := process.p.Stdio()
if err != nil {
return nil, nil, nil, makeProcessError(process, operation, "", err)
err = convertProcessError(err, process)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return pipes[0], pipes[1], pipes[2], nil
return stdin, stdout, stderr, err
}
// CloseStdin closes the write side of the stdin pipe so that the process is
// notified on the read side that there is no more data in stdin.
func (process *process) CloseStdin() error {
process.handleLock.RLock()
defer process.handleLock.RUnlock()
operation := "CloseStdin"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
if process.handle == 0 {
return makeProcessError(process, operation, "", ErrAlreadyClosed)
}
modifyRequest := processModifyRequest{
Operation: modifyCloseHandle,
CloseHandle: &closeHandle{
Handle: stdIn,
},
}
modifyRequestb, err := json.Marshal(modifyRequest)
if err != nil {
return err
}
modifyRequestStr := string(modifyRequestb)
var resultp *uint16
err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp)
err = processHcsResult(err, resultp)
if err != nil {
return makeProcessError(process, operation, "", err)
}
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
return convertProcessError(process.p.CloseStdin(), process)
}
// Close cleans up any state associated with the process but does not kill
// or wait on it.
func (process *process) Close() error {
process.handleLock.Lock()
defer process.handleLock.Unlock()
operation := "Close"
title := "HCSShim::Process::" + operation
logrus.Debugf(title+" processid=%d", process.processID)
// Don't double free this
if process.handle == 0 {
return nil
}
if err := process.unregisterCallback(); err != nil {
return makeProcessError(process, operation, "", err)
}
if err := hcsCloseProcess(process.handle); err != nil {
return makeProcessError(process, operation, "", err)
}
process.handle = 0
logrus.Debugf(title+" succeeded processid=%d", process.processID)
return nil
}
func (process *process) registerCallback() error {
context := &notifcationWatcherContext{
channels: newChannels(),
}
callbackMapLock.Lock()
callbackNumber := nextCallback
nextCallback++
callbackMap[callbackNumber] = context
callbackMapLock.Unlock()
var callbackHandle hcsCallback
err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle)
if err != nil {
return err
}
context.handle = callbackHandle
process.callbackNumber = callbackNumber
return nil
}
func (process *process) unregisterCallback() error {
callbackNumber := process.callbackNumber
callbackMapLock.RLock()
context := callbackMap[callbackNumber]
callbackMapLock.RUnlock()
if context == nil {
return nil
}
handle := context.handle
if handle == 0 {
return nil
}
// hcsUnregisterProcessCallback has its own syncronization
// to wait for all callbacks to complete. We must NOT hold the callbackMapLock.
err := hcsUnregisterProcessCallback(handle)
if err != nil {
return err
}
closeChannels(context.channels)
callbackMapLock.Lock()
callbackMap[callbackNumber] = nil
callbackMapLock.Unlock()
handle = 0
return nil
return convertProcessError(process.p.Close(), process)
}

View file

@ -1,27 +0,0 @@
package hcsshim
import "github.com/sirupsen/logrus"
// UnprepareLayer disables the filesystem filter for the read-write layer with
// the given id.
func UnprepareLayer(info DriverInfo, layerId string) error {
title := "hcsshim::UnprepareLayer "
logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId)
// Convert info to API calling convention
infop, err := convertDriverInfo(info)
if err != nil {
logrus.Error(err)
return err
}
err = unprepareLayer(&infop, layerId)
if err != nil {
err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour)
logrus.Error(err)
return err
}
logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId)
return nil
}

View file

@ -2,6 +2,5 @@ package hcsshim
// IsTP4 returns whether the currently running Windows build is at least TP4.
func IsTP4() bool {
// HNSCall was not present in TP4
return procHNSCall.Find() != nil
return false
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
package hcsshim
import (
"syscall"
"unsafe"
"github.com/Microsoft/hcsshim/internal/interop"
"golang.org/x/sys/windows"
)
var _ unsafe.Pointer
// Do the interface allocations only once for common
// Errno values.
const (
errnoERROR_IO_PENDING = 997
)
var (
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case errnoERROR_IO_PENDING:
return errERROR_IO_PENDING
}
// TODO: add more here, after collecting data on the common
// error values see on Windows. (perhaps when running
// all.bat?)
return e
}
var (
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId")
)
func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) {
r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0)
if int32(r0) < 0 {
hr = interop.Win32FromHresult(r0)
}
return
}

View file

@ -1,9 +0,0 @@
// +build ppc64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

View file

@ -90,7 +90,7 @@ Calls like `WatchTree` may return different events (or number of events) dependi
Only `Consul` and `etcd` have support for TLS and you should build and provide your own `config.TLS` object to feed the client. Support is planned for `zookeeper`.
##Roadmap
## Roadmap
- Make the API nicer to use (using `options`)
- Provide more options (`consistency` for example)
@ -98,10 +98,10 @@ Only `Consul` and `etcd` have support for TLS and you should build and provide y
- Better key formatting
- New backends?
##Contributing
## Contributing
Want to hack on libkv? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply.
##Copyright and license
## Copyright and license
Copyright © 2014-2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.

View file

@ -10,9 +10,9 @@ import (
"sync/atomic"
"time"
"github.com/boltdb/bolt"
"github.com/docker/libkv"
"github.com/docker/libkv/store"
bolt "go.etcd.io/bbolt"
)
var (

View file

@ -1,6 +1,7 @@
package etcd
import (
"context"
"crypto/tls"
"errors"
"log"
@ -9,8 +10,6 @@ import (
"strings"
"time"
"golang.org/x/net/context"
etcd "github.com/coreos/etcd/client"
"github.com/docker/libkv"
"github.com/docker/libkv/store"
@ -30,13 +29,29 @@ type Etcd struct {
}
type etcdLock struct {
client etcd.KeysAPI
stopLock chan struct{}
client etcd.KeysAPI
key string
value string
ttl time.Duration
// Closed when the caller wants to stop renewing the lock. I'm not sure
// why this is even used - you could just call the Unlock() method.
stopRenew chan struct{}
key string
value string
last *etcd.Response
ttl time.Duration
// When the lock is held, this is the last modified index of the key.
// Used for conditional updates when extending the lock TTL and when
// conditionall deleteing when Unlock() is called.
lastIndex uint64
// When the lock is held, this function will cancel the locked context.
// This is called both by the Unlock() method in order to stop the
// background holding goroutine and in a deferred call in that background
// holding goroutine in case the lock is lost due to an error or the
// stopRenew channel is closed. Calling this function also closes the chan
// returned by the Lock() method.
cancel context.CancelFunc
// Used to sync the Unlock() call with the background holding goroutine.
// This channel is closed when that background goroutine exits, signalling
// that it is okay to conditionally delete the key.
doneHolding chan struct{}
}
const (
@ -472,112 +487,97 @@ func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locke
// doing so. It returns a channel that is closed if our
// lock is lost or if an error occurs
func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) {
// Lock holder channel
lockHeld := make(chan struct{})
stopLocking := l.stopRenew
// Conditional Set - only if the key does not exist.
setOpts := &etcd.SetOptions{
TTL: l.ttl,
TTL: l.ttl,
PrevExist: etcd.PrevNoExist,
}
for {
setOpts.PrevExist = etcd.PrevNoExist
resp, err := l.client.Set(context.Background(), l.key, l.value, setOpts)
if err != nil {
if etcdError, ok := err.(etcd.Error); ok {
if etcdError.Code != etcd.ErrorCodeNodeExist {
return nil, err
}
setOpts.PrevIndex = ^uint64(0)
}
} else {
setOpts.PrevIndex = resp.Node.ModifiedIndex
}
setOpts.PrevExist = etcd.PrevExist
l.last, err = l.client.Set(context.Background(), l.key, l.value, setOpts)
if err == nil {
// Leader section
l.stopLock = stopLocking
go l.holdLock(l.key, lockHeld, stopLocking)
break
} else {
// If this is a legitimate error, return
if etcdError, ok := err.(etcd.Error); ok {
if etcdError.Code != etcd.ErrorCodeTestFailed {
return nil, err
}
}
// Acquired the lock!
l.lastIndex = resp.Node.ModifiedIndex
lockedCtx, cancel := context.WithCancel(context.Background())
l.cancel = cancel
l.doneHolding = make(chan struct{})
// Seeker section
errorCh := make(chan error)
chWStop := make(chan bool)
free := make(chan bool)
go l.holdLock(lockedCtx)
go l.waitLock(l.key, errorCh, chWStop, free)
// Wait for the key to be available or for
// a signal to stop trying to lock the key
select {
case <-free:
break
case err := <-errorCh:
return nil, err
case <-stopChan:
return nil, ErrAbortTryLock
}
// Delete or Expire event occurred
// Retry
return lockedCtx.Done(), nil
}
}
return lockHeld, nil
etcdErr, ok := err.(etcd.Error)
if !ok || etcdErr.Code != etcd.ErrorCodeNodeExist {
return nil, err // Unexpected error.
}
// Need to wait for the lock key to expire or be deleted.
if err := l.waitLock(stopChan, etcdErr.Index); err != nil {
return nil, err
}
// Delete or Expire event occurred.
// Retry
}
}
// Hold the lock as long as we can
// Hold the lock as long as we can.
// Updates the key ttl periodically until we receive
// an explicit stop signal from the Unlock method
func (l *etcdLock) holdLock(key string, lockHeld chan struct{}, stopLocking <-chan struct{}) {
defer close(lockHeld)
// an explicit stop signal from the Unlock method OR
// the stopRenew channel is closed.
func (l *etcdLock) holdLock(ctx context.Context) {
defer close(l.doneHolding)
defer l.cancel()
update := time.NewTicker(l.ttl / 3)
defer update.Stop()
var err error
setOpts := &etcd.SetOptions{TTL: l.ttl}
for {
select {
case <-update.C:
setOpts.PrevIndex = l.last.Node.ModifiedIndex
l.last, err = l.client.Set(context.Background(), key, l.value, setOpts)
setOpts.PrevIndex = l.lastIndex
resp, err := l.client.Set(ctx, l.key, l.value, setOpts)
if err != nil {
return
}
case <-stopLocking:
l.lastIndex = resp.Node.ModifiedIndex
case <-l.stopRenew:
return
case <-ctx.Done():
return
}
}
}
// WaitLock simply waits for the key to be available for creation
func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan bool, free chan<- bool) {
opts := &etcd.WatcherOptions{Recursive: false}
watcher := l.client.Watcher(key, opts)
for {
event, err := watcher.Next(context.Background())
if err != nil {
errorCh <- err
return
// WaitLock simply waits for the key to be available for creation.
func (l *etcdLock) waitLock(stopWait <-chan struct{}, afterIndex uint64) error {
waitCtx, waitCancel := context.WithCancel(context.Background())
defer waitCancel()
go func() {
select {
case <-stopWait:
// If the caller closes the stopWait, cancel the wait context.
waitCancel()
case <-waitCtx.Done():
// No longer waiting.
}
if event.Action == "delete" || event.Action == "expire" {
free <- true
return
}()
watcher := l.client.Watcher(l.key, &etcd.WatcherOptions{AfterIndex: afterIndex})
for {
event, err := watcher.Next(waitCtx)
if err != nil {
if err == context.Canceled {
return ErrAbortTryLock
}
return err
}
switch event.Action {
case "delete", "compareAndDelete", "expire":
return nil // The key has been deleted or expired.
}
}
}
@ -585,19 +585,17 @@ func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan boo
// Unlock the "key". Calling unlock while
// not holding the lock will throw an error
func (l *etcdLock) Unlock() error {
if l.stopLock != nil {
l.stopLock <- struct{}{}
}
if l.last != nil {
l.cancel() // Will signal the holdLock goroutine to exit.
<-l.doneHolding // Wait for the holdLock goroutine to exit.
var err error
if l.lastIndex != 0 {
delOpts := &etcd.DeleteOptions{
PrevIndex: l.last.Node.ModifiedIndex,
}
_, err := l.client.Delete(context.Background(), l.key, delOpts)
if err != nil {
return err
PrevIndex: l.lastIndex,
}
_, err = l.client.Delete(context.Background(), l.key, delOpts)
}
return nil
return err
}
// Close closes the client connection

View file

@ -1,5 +1,18 @@
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
====
bbolt
=====
[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt)
[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt)
[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt)
[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt)
[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases)
[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
store. The purpose of this fork is to provide the Go community with an active
maintenance and development target for Bolt; the goal is improved reliability
and stability. bbolt includes bug fixes, performance enhancements, and features
not found in Bolt while preserving backwards compatibility with the Bolt API.
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
[LMDB project][lmdb]. The goal of the project is to provide a simple,
@ -10,6 +23,8 @@ Since Bolt is meant to be used as such a low-level piece of functionality,
simplicity is key. The API will be small and only focus on getting values
and setting values. That's it.
[gh_ben]: https://github.com/benbjohnson
[bolt]: https://github.com/boltdb/bolt
[hyc_symas]: https://twitter.com/hyc_symas
[lmdb]: http://symas.com/mdb/
@ -17,40 +32,46 @@ and setting values. That's it.
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
test coverage and randomized black box testing are used to ensure database
consistency and thread safety. Bolt is currently in high-load production
consistency and thread safety. Bolt is currently used in high-load production
environments serving databases as large as 1TB. Many companies such as
Shopify and Heroku use Bolt-backed services every day.
## Project versioning
bbolt uses [semantic versioning](http://semver.org).
API should not change between patch and minor releases.
New minor versions may add additional features to the API.
## Table of Contents
- [Getting Started](#getting-started)
- [Installing](#installing)
- [Opening a database](#opening-a-database)
- [Transactions](#transactions)
- [Read-write transactions](#read-write-transactions)
- [Read-only transactions](#read-only-transactions)
- [Batch read-write transactions](#batch-read-write-transactions)
- [Managing transactions manually](#managing-transactions-manually)
- [Using buckets](#using-buckets)
- [Using key/value pairs](#using-keyvalue-pairs)
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- [Iterating over keys](#iterating-over-keys)
- [Prefix scans](#prefix-scans)
- [Range scans](#range-scans)
- [ForEach()](#foreach)
- [Nested buckets](#nested-buckets)
- [Database backups](#database-backups)
- [Statistics](#statistics)
- [Read-Only Mode](#read-only-mode)
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
- [Resources](#resources)
- [Comparison with other databases](#comparison-with-other-databases)
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- [LevelDB, RocksDB](#leveldb-rocksdb)
- [LMDB](#lmdb)
- [Caveats & Limitations](#caveats--limitations)
- [Reading the Source](#reading-the-source)
- [Other Projects Using Bolt](#other-projects-using-bolt)
- [Getting Started](#getting-started)
- [Installing](#installing)
- [Opening a database](#opening-a-database)
- [Transactions](#transactions)
- [Read-write transactions](#read-write-transactions)
- [Read-only transactions](#read-only-transactions)
- [Batch read-write transactions](#batch-read-write-transactions)
- [Managing transactions manually](#managing-transactions-manually)
- [Using buckets](#using-buckets)
- [Using key/value pairs](#using-keyvalue-pairs)
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- [Iterating over keys](#iterating-over-keys)
- [Prefix scans](#prefix-scans)
- [Range scans](#range-scans)
- [ForEach()](#foreach)
- [Nested buckets](#nested-buckets)
- [Database backups](#database-backups)
- [Statistics](#statistics)
- [Read-Only Mode](#read-only-mode)
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
- [Resources](#resources)
- [Comparison with other databases](#comparison-with-other-databases)
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- [LevelDB, RocksDB](#leveldb-rocksdb)
- [LMDB](#lmdb)
- [Caveats & Limitations](#caveats--limitations)
- [Reading the Source](#reading-the-source)
- [Other Projects Using Bolt](#other-projects-using-bolt)
## Getting Started
@ -59,13 +80,28 @@ Shopify and Heroku use Bolt-backed services every day.
To start using Bolt, install Go and run `go get`:
```sh
$ go get github.com/boltdb/bolt/...
$ go get go.etcd.io/bbolt/...
```
This will retrieve the library and install the `bolt` command line utility into
your `$GOBIN` path.
### Importing bbolt
To use bbolt as an embedded key-value store, import as:
```go
import bolt "go.etcd.io/bbolt"
db, err := bolt.Open(path, 0666, nil)
if err != nil {
return err
}
defer db.Close()
```
### Opening a database
The top-level object in Bolt is a `DB`. It is represented as a single file on
@ -79,7 +115,7 @@ package main
import (
"log"
"github.com/boltdb/bolt"
bolt "go.etcd.io/bbolt"
)
func main() {
@ -395,7 +431,7 @@ db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("MyBucket")).Cursor()
prefix := []byte("1234")
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
}
@ -464,6 +500,55 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
func (*Bucket) DeleteBucket(key []byte) error
```
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
```go
// createUser creates a new user in the given account.
func createUser(accountID int, u *User) error {
// Start the transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Retrieve the root bucket for the account.
// Assume this has already been created when the account was set up.
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
// Setup the users bucket.
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
if err != nil {
return err
}
// Generate an ID for the new user.
userID, err := bkt.NextSequence()
if err != nil {
return err
}
u.ID = userID
// Marshal and save the encoded user.
if buf, err := json.Marshal(u); err != nil {
return err
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
return err
}
// Commit the transaction.
if err := tx.Commit(); err != nil {
return err
}
return nil
}
```
### Database backups
@ -473,7 +558,7 @@ this from a read-only transaction, it will perform a hot backup and not block
your other database reads and writes.
By default, it will use a regular file handle which will utilize the operating
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
documentation for information about optimizing for larger-than-RAM datasets.
One common use case is to backup over HTTP so you can use tools like `cURL` to
@ -719,6 +804,9 @@ Here are a few things to note when evaluating and using Bolt:
can be reused by a new page or can be unmapped from virtual memory and you'll
see an `unexpected fault address` panic when accessing it.
* Bolt uses an exclusive write lock on the database file so it cannot be
shared by multiple processes.
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
buckets that have random inserts will cause your database to have very poor
page utilization.
@ -759,7 +847,7 @@ Here are a few things to note when evaluating and using Bolt:
## Reading the Source
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
transactional key/value database so it can be a good starting point for people
interested in how databases work.
@ -811,47 +899,55 @@ them via pull request.
Below is a list of public, open source projects that use Bolt:
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
backed by boltdb.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
backed by boltdb.
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
If you are using Bolt in a project please send a pull request to add it to the list.

View file

@ -1,4 +1,4 @@
package bolt
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB

View file

@ -1,4 +1,4 @@
package bolt
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB

View file

@ -1,4 +1,4 @@
package bolt
package bbolt
import "unsafe"

View file

@ -1,6 +1,6 @@
// +build arm64
package bolt
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB

View file

@ -1,4 +1,4 @@
package bolt
package bbolt
import (
"syscall"

12
libnetwork/vendor/go.etcd.io/bbolt/bolt_mips64x.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// +build mips64 mips64le
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x8000000000 // 512GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

12
libnetwork/vendor/go.etcd.io/bbolt/bolt_mipsx.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// +build mips mipsle
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x40000000 // 1GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

Some files were not shown because too many files have changed in this diff Show more