# npm-rails
/node_modules
/npm-debug.log
+
+# Generated when building distribution packages
+/package-build.version
+++ /dev/null
-1.2.1.20181126194329
+++ /dev/null
-+ echo -n 'geckodriver: '
-+ which geckodriver || fatal "No geckodriver. Unable to find Mozilla geckodriver. Please download the server from https://github.com/mozilla/geckodriver/releases and place it somewhere on your PATH. More info at https://developer.mozilla.org/en-US/docs/Mozilla/QA/Marionette/WebDriver."
-
DASHQ_UNLESS_DEBUG=
fi
-EASY_INSTALL2=$(find_easy_install -$PYTHON2_VERSION "")
-EASY_INSTALL3=$(find_easy_install -$PYTHON3_VERSION 3)
-
RUN_BUILD_PACKAGES_PATH="`dirname \"$0\"`"
RUN_BUILD_PACKAGES_PATH="`( cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd )`" # absolutized and normalized
if [ -z "$RUN_BUILD_PACKAGES_PATH" ] ; then
echo ${repo_pkg_list} |grep -q ${complete_pkgname}
if [ $? -eq 0 ] ; then
echo "Package $complete_pkgname exists, not rebuilding!"
- curl -o ./${complete_pkgname} http://apt.arvados.org/pool/${D}/main/${repo_subdir}/${complete_pkgname}
+ curl -s -o ./${complete_pkgname} http://apt.arvados.org/pool/${D}/main/${repo_subdir}/${complete_pkgname}
return 1
elif test -f "$WORKSPACE/packages/$TARGET/processed/${complete_pkgname}" ; then
echo "Package $complete_pkgname exists, not rebuilding!"
else
centos_repo="http://rpm.arvados.org/CentOS/7/dev/x86_64/"
- repo_pkg_list=$(curl -o - ${centos_repo})
+ repo_pkg_list=$(curl -s -o - ${centos_repo})
echo ${repo_pkg_list} |grep -q ${complete_pkgname}
if [ $? -eq 0 ]; then
echo "Package $complete_pkgname exists, not rebuilding!"
- curl -o ./${complete_pkgname} ${centos_repo}${complete_pkgname}
+ curl -s -o ./${complete_pkgname} ${centos_repo}${complete_pkgname}
return 1
elif test -f "$WORKSPACE/packages/$TARGET/processed/${complete_pkgname}" ; then
echo "Package $complete_pkgname exists, not rebuilding!"
lib/controller
lib/crunchstat
lib/cloud
+lib/cloud/azure
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
lib/controller
lib/crunchstat
lib/cloud
+ lib/cloud/azure
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
//
// SPDX-License-Identifier: AGPL-3.0
-package cloud
+package azure
import (
"context"
"encoding/base64"
+ "encoding/json"
"fmt"
"net/http"
"regexp"
"sync"
"time"
+ "git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/Azure/go-autorest/autorest/to"
"github.com/jmcvetta/randutil"
- "github.com/mitchellh/mapstructure"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
-type AzureInstanceSetConfig struct {
- SubscriptionID string `mapstructure:"subscription_id"`
- ClientID string `mapstructure:"key"`
- ClientSecret string `mapstructure:"secret"`
- TenantID string `mapstructure:"tenant_id"`
- CloudEnv string `mapstructure:"cloud_environment"`
- ResourceGroup string `mapstructure:"resource_group"`
- Location string `mapstructure:"region"`
- Network string `mapstructure:"network"`
- Subnet string `mapstructure:"subnet"`
- StorageAccount string `mapstructure:"storage_account"`
- BlobContainer string `mapstructure:"blob_container"`
- Image string `mapstructure:"image"`
- DeleteDanglingResourcesAfter float64 `mapstructure:"delete_dangling_resources_after"`
-}
-
-type VirtualMachinesClientWrapper interface {
- CreateOrUpdate(ctx context.Context,
+// Driver is the azure implementation of the cloud.Driver interface.
+var Driver = cloud.DriverFunc(newAzureInstanceSet)
+
+type azureInstanceSetConfig struct {
+ SubscriptionID string
+ ClientID string
+ ClientSecret string
+ TenantID string
+ CloudEnvironment string
+ ResourceGroup string
+ Location string
+ Network string
+ Subnet string
+ StorageAccount string
+ BlobContainer string
+ DeleteDanglingResourcesAfter arvados.Duration
+}
+
+type virtualMachinesClientWrapper interface {
+ createOrUpdate(ctx context.Context,
resourceGroupName string,
VMName string,
parameters compute.VirtualMachine) (result compute.VirtualMachine, err error)
- Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error)
- ListComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error)
+ delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error)
+ listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error)
}
-type VirtualMachinesClientImpl struct {
+type virtualMachinesClientImpl struct {
inner compute.VirtualMachinesClient
}
-func (cl *VirtualMachinesClientImpl) CreateOrUpdate(ctx context.Context,
+func (cl *virtualMachinesClientImpl) createOrUpdate(ctx context.Context,
resourceGroupName string,
VMName string,
parameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {
future, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, VMName, parameters)
if err != nil {
- return compute.VirtualMachine{}, WrapAzureError(err)
+ return compute.VirtualMachine{}, wrapAzureError(err)
}
future.WaitForCompletionRef(ctx, cl.inner.Client)
r, err := future.Result(cl.inner)
- return r, WrapAzureError(err)
+ return r, wrapAzureError(err)
}
-func (cl *VirtualMachinesClientImpl) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+func (cl *virtualMachinesClientImpl) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
future, err := cl.inner.Delete(ctx, resourceGroupName, VMName)
if err != nil {
- return nil, WrapAzureError(err)
+ return nil, wrapAzureError(err)
}
err = future.WaitForCompletionRef(ctx, cl.inner.Client)
- return future.Response(), WrapAzureError(err)
+ return future.Response(), wrapAzureError(err)
}
-func (cl *VirtualMachinesClientImpl) ListComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
+func (cl *virtualMachinesClientImpl) listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
r, err := cl.inner.ListComplete(ctx, resourceGroupName)
- return r, WrapAzureError(err)
+ return r, wrapAzureError(err)
}
-type InterfacesClientWrapper interface {
- CreateOrUpdate(ctx context.Context,
+type interfacesClientWrapper interface {
+ createOrUpdate(ctx context.Context,
resourceGroupName string,
networkInterfaceName string,
parameters network.Interface) (result network.Interface, err error)
- Delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result *http.Response, err error)
- ListComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error)
+ delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result *http.Response, err error)
+ listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error)
}
-type InterfacesClientImpl struct {
+type interfacesClientImpl struct {
inner network.InterfacesClient
}
-func (cl *InterfacesClientImpl) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+func (cl *interfacesClientImpl) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
future, err := cl.inner.Delete(ctx, resourceGroupName, VMName)
if err != nil {
- return nil, WrapAzureError(err)
+ return nil, wrapAzureError(err)
}
err = future.WaitForCompletionRef(ctx, cl.inner.Client)
- return future.Response(), WrapAzureError(err)
+ return future.Response(), wrapAzureError(err)
}
-func (cl *InterfacesClientImpl) CreateOrUpdate(ctx context.Context,
+func (cl *interfacesClientImpl) createOrUpdate(ctx context.Context,
resourceGroupName string,
networkInterfaceName string,
parameters network.Interface) (result network.Interface, err error) {
future, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters)
if err != nil {
- return network.Interface{}, WrapAzureError(err)
+ return network.Interface{}, wrapAzureError(err)
}
future.WaitForCompletionRef(ctx, cl.inner.Client)
r, err := future.Result(cl.inner)
- return r, WrapAzureError(err)
+ return r, wrapAzureError(err)
}
-func (cl *InterfacesClientImpl) ListComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
+func (cl *interfacesClientImpl) listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
r, err := cl.inner.ListComplete(ctx, resourceGroupName)
- return r, WrapAzureError(err)
+ return r, wrapAzureError(err)
}
var quotaRe = regexp.MustCompile(`(?i:exceed|quota|limit)`)
-type AzureRateLimitError struct {
+type azureRateLimitError struct {
azure.RequestError
- earliestRetry time.Time
+ firstRetry time.Time
}
-func (ar *AzureRateLimitError) EarliestRetry() time.Time {
- return ar.earliestRetry
+func (ar *azureRateLimitError) EarliestRetry() time.Time {
+ return ar.firstRetry
}
-type AzureQuotaError struct {
+type azureQuotaError struct {
azure.RequestError
}
-func (ar *AzureQuotaError) IsQuotaError() bool {
+func (ar *azureQuotaError) IsQuotaError() bool {
return true
}
-func WrapAzureError(err error) error {
+func wrapAzureError(err error) error {
de, ok := err.(autorest.DetailedError)
if !ok {
return err
earliestRetry = time.Now().Add(20 * time.Second)
}
}
- return &AzureRateLimitError{*rq, earliestRetry}
+ return &azureRateLimitError{*rq, earliestRetry}
}
if rq.ServiceError == nil {
return err
}
if quotaRe.FindString(rq.ServiceError.Code) != "" || quotaRe.FindString(rq.ServiceError.Message) != "" {
- return &AzureQuotaError{*rq}
+ return &azureQuotaError{*rq}
}
return err
}
-type AzureInstanceSet struct {
- azconfig AzureInstanceSetConfig
- vmClient VirtualMachinesClientWrapper
- netClient InterfacesClientWrapper
+type azureInstanceSet struct {
+ azconfig azureInstanceSetConfig
+ vmClient virtualMachinesClientWrapper
+ netClient interfacesClientWrapper
storageAcctClient storageacct.AccountsClient
azureEnv azure.Environment
interfaces map[string]network.Interface
logger logrus.FieldLogger
}
-func NewAzureInstanceSet(config map[string]interface{}, dispatcherID InstanceSetID, logger logrus.FieldLogger) (prv InstanceSet, err error) {
- azcfg := AzureInstanceSetConfig{}
- if err = mapstructure.Decode(config, &azcfg); err != nil {
+func newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+ azcfg := azureInstanceSetConfig{}
+ err = json.Unmarshal(config, &azcfg)
+ if err != nil {
return nil, err
}
- ap := AzureInstanceSet{logger: logger}
+
+ ap := azureInstanceSet{logger: logger}
err = ap.setup(azcfg, string(dispatcherID))
if err != nil {
return nil, err
return &ap, nil
}
-func (az *AzureInstanceSet) setup(azcfg AzureInstanceSetConfig, dispatcherID string) (err error) {
+func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID string) (err error) {
az.azconfig = azcfg
vmClient := compute.NewVirtualMachinesClient(az.azconfig.SubscriptionID)
netClient := network.NewInterfacesClient(az.azconfig.SubscriptionID)
storageAcctClient := storageacct.NewAccountsClient(az.azconfig.SubscriptionID)
- az.azureEnv, err = azure.EnvironmentFromName(az.azconfig.CloudEnv)
+ az.azureEnv, err = azure.EnvironmentFromName(az.azconfig.CloudEnvironment)
if err != nil {
return err
}
netClient.Authorizer = authorizer
storageAcctClient.Authorizer = authorizer
- az.vmClient = &VirtualMachinesClientImpl{vmClient}
- az.netClient = &InterfacesClientImpl{netClient}
+ az.vmClient = &virtualMachinesClientImpl{vmClient}
+ az.netClient = &interfacesClientImpl{netClient}
az.storageAcctClient = storageAcctClient
az.dispatcherID = dispatcherID
tk.Stop()
return
case <-tk.C:
- az.ManageBlobs()
+ az.manageBlobs()
}
}
}()
az.deleteNIC = make(chan string)
az.deleteBlob = make(chan storage.Blob)
- for i := 0; i < 4; i += 1 {
+ for i := 0; i < 4; i++ {
go func() {
for {
nicname, ok := <-az.deleteNIC
if !ok {
return
}
- _, delerr := az.netClient.Delete(context.Background(), az.azconfig.ResourceGroup, nicname)
+ _, delerr := az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, nicname)
if delerr != nil {
az.logger.WithError(delerr).Warnf("Error deleting %v", nicname)
} else {
return nil
}
-func (az *AzureInstanceSet) Create(
+func (az *azureInstanceSet) Create(
instanceType arvados.InstanceType,
- imageId ImageID,
- newTags InstanceTags,
- publicKey ssh.PublicKey) (Instance, error) {
+ imageID cloud.ImageID,
+ newTags cloud.InstanceTags,
+ publicKey ssh.PublicKey) (cloud.Instance, error) {
az.stopWg.Add(1)
defer az.stopWg.Done()
},
},
}
- nic, err := az.netClient.CreateOrUpdate(az.ctx, az.azconfig.ResourceGroup, name+"-nic", nicParameters)
+ nic, err := az.netClient.createOrUpdate(az.ctx, az.azconfig.ResourceGroup, name+"-nic", nicParameters)
if err != nil {
- return nil, WrapAzureError(err)
+ return nil, wrapAzureError(err)
}
- instance_vhd := fmt.Sprintf("https://%s.blob.%s/%s/%s-os.vhd",
+ instanceVhd := fmt.Sprintf("https://%s.blob.%s/%s/%s-os.vhd",
az.azconfig.StorageAccount,
az.azureEnv.StorageEndpointSuffix,
az.azconfig.BlobContainer,
Name: to.StringPtr(name + "-os"),
CreateOption: compute.FromImage,
Image: &compute.VirtualHardDisk{
- URI: to.StringPtr(string(imageId)),
+ URI: to.StringPtr(string(imageID)),
},
Vhd: &compute.VirtualHardDisk{
- URI: &instance_vhd,
+ URI: &instanceVhd,
},
},
},
},
}
- vm, err := az.vmClient.CreateOrUpdate(az.ctx, az.azconfig.ResourceGroup, name, vmParameters)
+ vm, err := az.vmClient.createOrUpdate(az.ctx, az.azconfig.ResourceGroup, name, vmParameters)
if err != nil {
- return nil, WrapAzureError(err)
+ return nil, wrapAzureError(err)
}
- return &AzureInstance{
+ return &azureInstance{
provider: az,
nic: nic,
vm: vm,
}, nil
}
-func (az *AzureInstanceSet) Instances(InstanceTags) ([]Instance, error) {
+func (az *azureInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, error) {
az.stopWg.Add(1)
defer az.stopWg.Done()
- interfaces, err := az.ManageNics()
+ interfaces, err := az.manageNics()
if err != nil {
return nil, err
}
- result, err := az.vmClient.ListComplete(az.ctx, az.azconfig.ResourceGroup)
+ result, err := az.vmClient.listComplete(az.ctx, az.azconfig.ResourceGroup)
if err != nil {
- return nil, WrapAzureError(err)
+ return nil, wrapAzureError(err)
}
- instances := make([]Instance, 0)
+ instances := make([]cloud.Instance, 0)
for ; result.NotDone(); err = result.Next() {
if err != nil {
- return nil, WrapAzureError(err)
+ return nil, wrapAzureError(err)
}
if strings.HasPrefix(*result.Value().Name, az.namePrefix) {
- instances = append(instances, &AzureInstance{
+ instances = append(instances, &azureInstance{
provider: az,
vm: result.Value(),
nic: interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID]})
// not associated with a virtual machine and have a "create-at" time
// more than DeleteDanglingResourcesAfter (to prevent racing and
// deleting newly created NICs) in the past are deleted.
-func (az *AzureInstanceSet) ManageNics() (map[string]network.Interface, error) {
+func (az *azureInstanceSet) manageNics() (map[string]network.Interface, error) {
az.stopWg.Add(1)
defer az.stopWg.Done()
- result, err := az.netClient.ListComplete(az.ctx, az.azconfig.ResourceGroup)
+ result, err := az.netClient.listComplete(az.ctx, az.azconfig.ResourceGroup)
if err != nil {
- return nil, WrapAzureError(err)
+ return nil, wrapAzureError(err)
}
interfaces := make(map[string]network.Interface)
interfaces[*result.Value().ID] = result.Value()
} else {
if result.Value().Tags["created-at"] != nil {
- created_at, err := time.Parse(time.RFC3339Nano, *result.Value().Tags["created-at"])
+ createdAt, err := time.Parse(time.RFC3339Nano, *result.Value().Tags["created-at"])
if err == nil {
- if timestamp.Sub(created_at).Seconds() > az.azconfig.DeleteDanglingResourcesAfter {
+ if timestamp.Sub(createdAt).Seconds() > az.azconfig.DeleteDanglingResourcesAfter.Duration().Seconds() {
az.logger.Printf("Will delete %v because it is older than %v s", *result.Value().Name, az.azconfig.DeleteDanglingResourcesAfter)
az.deleteNIC <- *result.Value().Name
}
// have "namePrefix", are "available" (which means they are not
// leased to a VM) and haven't been modified for
// DeleteDanglingResourcesAfter seconds.
-func (az *AzureInstanceSet) ManageBlobs() {
+func (az *azureInstanceSet) manageBlobs() {
result, err := az.storageAcctClient.ListKeys(az.ctx, az.azconfig.ResourceGroup, az.azconfig.StorageAccount)
if err != nil {
az.logger.WithError(err).Warn("Couldn't get account keys")
if b.Properties.BlobType == storage.BlobTypePage &&
b.Properties.LeaseState == "available" &&
b.Properties.LeaseStatus == "unlocked" &&
- age.Seconds() > az.azconfig.DeleteDanglingResourcesAfter {
+ age.Seconds() > az.azconfig.DeleteDanglingResourcesAfter.Duration().Seconds() {
az.logger.Printf("Blob %v is unlocked and not modified for %v seconds, will delete", b.Name, age.Seconds())
az.deleteBlob <- b
}
}
-func (az *AzureInstanceSet) Stop() {
+func (az *azureInstanceSet) Stop() {
az.stopFunc()
az.stopWg.Wait()
close(az.deleteNIC)
close(az.deleteBlob)
}
-type AzureInstance struct {
- provider *AzureInstanceSet
+type azureInstance struct {
+ provider *azureInstanceSet
nic network.Interface
vm compute.VirtualMachine
}
-func (ai *AzureInstance) ID() InstanceID {
- return InstanceID(*ai.vm.ID)
+func (ai *azureInstance) ID() cloud.InstanceID {
+ return cloud.InstanceID(*ai.vm.ID)
}
-func (ai *AzureInstance) String() string {
+func (ai *azureInstance) String() string {
return *ai.vm.Name
}
-func (ai *AzureInstance) ProviderType() string {
+func (ai *azureInstance) ProviderType() string {
return string(ai.vm.VirtualMachineProperties.HardwareProfile.VMSize)
}
-func (ai *AzureInstance) SetTags(newTags InstanceTags) error {
+func (ai *azureInstance) SetTags(newTags cloud.InstanceTags) error {
ai.provider.stopWg.Add(1)
defer ai.provider.stopWg.Done()
Location: &ai.provider.azconfig.Location,
Tags: tags,
}
- vm, err := ai.provider.vmClient.CreateOrUpdate(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name, vmParameters)
+ vm, err := ai.provider.vmClient.createOrUpdate(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name, vmParameters)
if err != nil {
- return WrapAzureError(err)
+ return wrapAzureError(err)
}
ai.vm = vm
return nil
}
-func (ai *AzureInstance) Tags() InstanceTags {
+func (ai *azureInstance) Tags() cloud.InstanceTags {
tags := make(map[string]string)
for k, v := range ai.vm.Tags {
return tags
}
-func (ai *AzureInstance) Destroy() error {
+func (ai *azureInstance) Destroy() error {
ai.provider.stopWg.Add(1)
defer ai.provider.stopWg.Done()
- _, err := ai.provider.vmClient.Delete(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name)
- return WrapAzureError(err)
+ _, err := ai.provider.vmClient.delete(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name)
+ return wrapAzureError(err)
}
-func (ai *AzureInstance) Address() string {
+func (ai *azureInstance) Address() string {
return *(*ai.nic.IPConfigurations)[0].PrivateIPAddress
}
-func (ai *AzureInstance) VerifyHostKey(receivedKey ssh.PublicKey, client *ssh.Client) error {
+func (ai *azureInstance) VerifyHostKey(receivedKey ssh.PublicKey, client *ssh.Client) error {
ai.provider.stopWg.Add(1)
defer ai.provider.stopWg.Done()
if tg != "" {
if remoteFingerprint == tg {
return nil
- } else {
- return fmt.Errorf("Key fingerprint did not match, expected %q got %q", tg, remoteFingerprint)
}
+ return fmt.Errorf("Key fingerprint did not match, expected %q got %q", tg, remoteFingerprint)
}
nodetokenTag := tags["node-token"]
// SPDX-License-Identifier: AGPL-3.0
//
//
-// How to manually run individual tests against the real cloud
+// How to manually run individual tests against the real cloud:
//
-// $ go test -v git.curoverse.com/arvados.git/lib/cloud -live-azure-cfg azconfig.yml -check.f=TestListInstances
+// $ go test -v git.curoverse.com/arvados.git/lib/cloud/azure -live-azure-cfg azconfig.yml -check.f=TestCreate
+//
+// Tests should be run individually and in the order they are listed in the file:
//
// Example azconfig.yml:
//
-// subscription_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
-// key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
-// region: centralus
-// cloud_environment: AzurePublicCloud
-// secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-// tenant_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
-// resource_group: zzzzz
-// network: zzzzz
-// subnet: zzzzz-subnet-private
-// storage_account: example
-// blob_container: vhds
-// image: "https://example.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX.vhd"
-// delete_dangling_resources_after: 20
-// authorized_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLQS1ExT2+WjA0d/hntEAyAtgeN1W2ik2QX8c2zO6HjlPHWXL92r07W0WMuDib40Pcevpi1BXeBWXA9ZB5KKMJB+ukaAu22KklnQuUmNvk6ZXnPKSkGxuCYvPQb08WhHf3p1VxiKfP3iauedBDM4x9/bkJohlBBQiFXzNUcQ+a6rKiMzmJN2gbL8ncyUzc+XQ5q4JndTwTGtOlzDiGOc9O4z5Dd76wtAVJneOuuNpwfFRVHThpJM6VThpCZOnl8APaceWXKeuwOuCae3COZMz++xQfxOfZ9Z8aIwo+TlQhsRaNfZ4Vjrop6ej8dtfZtgUFKfbXEOYaHrGrWGotFDTD example@example"
-
-package cloud
+// ImageIDForTestSuite: "https://example.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX.vhd"
+// DriverParameters:
+// SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+// ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+// Location: centralus
+// CloudEnvironment: AzurePublicCloud
+// ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+// TenantId: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+// ResourceGroup: zzzzz
+// Network: zzzzz
+// Subnet: zzzzz-subnet-private
+// StorageAccount: example
+// BlobContainer: vhds
+// DeleteDanglingResourcesAfter: 20s
+
+package azure
import (
"context"
+ "encoding/json"
"errors"
"flag"
"io/ioutil"
"net"
"net/http"
"os"
+ "testing"
"time"
+ "git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/config"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
check "gopkg.in/check.v1"
)
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
type AzureInstanceSetSuite struct{}
var _ = check.Suite(&AzureInstanceSetSuite{})
type VirtualMachinesClientStub struct{}
-var testKey []byte = []byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLQS1ExT2+WjA0d/hntEAyAtgeN1W2ik2QX8c2zO6HjlPHWXL92r07W0WMuDib40Pcevpi1BXeBWXA9ZB5KKMJB+ukaAu22KklnQuUmNvk6ZXnPKSkGxuCYvPQb08WhHf3p1VxiKfP3iauedBDM4x9/bkJohlBBQiFXzNUcQ+a6rKiMzmJN2gbL8ncyUzc+XQ5q4JndTwTGtOlzDiGOc9O4z5Dd76wtAVJneOuuNpwfFRVHThpJM6VThpCZOnl8APaceWXKeuwOuCae3COZMz++xQfxOfZ9Z8aIwo+TlQhsRaNfZ4Vjrop6ej8dtfZtgUFKfbXEOYaHrGrWGotFDTD example@example`)
+var testKey = []byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLQS1ExT2+WjA0d/hntEAyAtgeN1W2ik2QX8c2zO6HjlPHWXL92r07W0WMuDib40Pcevpi1BXeBWXA9ZB5KKMJB+ukaAu22KklnQuUmNvk6ZXnPKSkGxuCYvPQb08WhHf3p1VxiKfP3iauedBDM4x9/bkJohlBBQiFXzNUcQ+a6rKiMzmJN2gbL8ncyUzc+XQ5q4JndTwTGtOlzDiGOc9O4z5Dd76wtAVJneOuuNpwfFRVHThpJM6VThpCZOnl8APaceWXKeuwOuCae3COZMz++xQfxOfZ9Z8aIwo+TlQhsRaNfZ4Vjrop6ej8dtfZtgUFKfbXEOYaHrGrWGotFDTD example@example`)
-func (*VirtualMachinesClientStub) CreateOrUpdate(ctx context.Context,
+func (*VirtualMachinesClientStub) createOrUpdate(ctx context.Context,
resourceGroupName string,
VMName string,
parameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {
return parameters, nil
}
-func (*VirtualMachinesClientStub) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+func (*VirtualMachinesClientStub) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
return nil, nil
}
-func (*VirtualMachinesClientStub) ListComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
+func (*VirtualMachinesClientStub) listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
return compute.VirtualMachineListResultIterator{}, nil
}
type InterfacesClientStub struct{}
-func (*InterfacesClientStub) CreateOrUpdate(ctx context.Context,
+func (*InterfacesClientStub) createOrUpdate(ctx context.Context,
resourceGroupName string,
nicName string,
parameters network.Interface) (result network.Interface, err error) {
return parameters, nil
}
-func (*InterfacesClientStub) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+func (*InterfacesClientStub) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
return nil, nil
}
-func (*InterfacesClientStub) ListComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
+func (*InterfacesClientStub) listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
return network.InterfaceListResultIterator{}, nil
}
+type testConfig struct {
+ ImageIDForTestSuite string
+ DriverParameters json.RawMessage
+}
+
var live = flag.String("live-azure-cfg", "", "Test with real azure API, provide config file")
-func GetInstanceSet() (InstanceSet, ImageID, arvados.Cluster, error) {
+func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
cluster := arvados.Cluster{
InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
"tiny": arvados.InstanceType{
},
})}
if *live != "" {
- cfg := make(map[string]interface{})
- err := config.LoadFile(&cfg, *live)
+ var exampleCfg testConfig
+ err := config.LoadFile(&exampleCfg, *live)
if err != nil {
- return nil, ImageID(""), cluster, err
+ return nil, cloud.ImageID(""), cluster, err
}
- ap, err := NewAzureInstanceSet(cfg, "test123", logrus.StandardLogger())
- return ap, ImageID(cfg["image"].(string)), cluster, err
- } else {
- ap := AzureInstanceSet{
- azconfig: AzureInstanceSetConfig{
- BlobContainer: "vhds",
- },
- dispatcherID: "test123",
- namePrefix: "compute-test123-",
- logger: logrus.StandardLogger(),
- deleteNIC: make(chan string),
- deleteBlob: make(chan storage.Blob),
- }
- ap.ctx, ap.stopFunc = context.WithCancel(context.Background())
- ap.vmClient = &VirtualMachinesClientStub{}
- ap.netClient = &InterfacesClientStub{}
- return &ap, ImageID("blob"), cluster, nil
+
+ ap, err := newAzureInstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+ return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
+ }
+ ap := azureInstanceSet{
+ azconfig: azureInstanceSetConfig{
+ BlobContainer: "vhds",
+ },
+ dispatcherID: "test123",
+ namePrefix: "compute-test123-",
+ logger: logrus.StandardLogger(),
+ deleteNIC: make(chan string),
+ deleteBlob: make(chan storage.Blob),
}
+ ap.ctx, ap.stopFunc = context.WithCancel(context.Background())
+ ap.vmClient = &VirtualMachinesClientStub{}
+ ap.netClient = &InterfacesClientStub{}
+ return &ap, cloud.ImageID("blob"), cluster, nil
}
func (*AzureInstanceSetSuite) TestCreate(c *check.C) {
c.Fatal("Error making provider", err)
}
- ap.(*AzureInstanceSet).ManageNics()
+ ap.(*azureInstanceSet).manageNics()
ap.Stop()
}
c.Fatal("Error making provider", err)
}
- ap.(*AzureInstanceSet).ManageBlobs()
+ ap.(*azureInstanceSet).manageBlobs()
ap.Stop()
}
c.Fatal("Error making provider", err)
}
- _, err = ap.(*AzureInstanceSet).netClient.Delete(context.Background(), "fakefakefake", "fakefakefake")
+ _, err = ap.(*azureInstanceSet).netClient.delete(context.Background(), "fakefakefake", "fakefakefake")
de, ok := err.(autorest.DetailedError)
if ok {
ServiceError: &azure.ServiceError{},
},
}
- wrapped := WrapAzureError(retryError)
- _, ok := wrapped.(RateLimitError)
+ wrapped := wrapAzureError(retryError)
+ _, ok := wrapped.(cloud.RateLimitError)
c.Check(ok, check.Equals, true)
quotaError := autorest.DetailedError{
},
},
}
- wrapped = WrapAzureError(quotaError)
- _, ok = wrapped.(QuotaError)
+ wrapped = wrapAzureError(quotaError)
+ _, ok = wrapped.(cloud.QuotaError)
c.Check(ok, check.Equals, true)
}
}
}
-func SetupSSHClient(c *check.C, inst Instance) (*ssh.Client, error) {
+func SetupSSHClient(c *check.C, inst cloud.Instance) (*ssh.Client, error) {
addr := inst.Address() + ":2222"
if addr == "" {
return nil, errors.New("instance has no address")
+++ /dev/null
-// Copyright (C) The Arvados Authors. All rights reserved.
-//
-// SPDX-License-Identifier: AGPL-3.0
-
-package cloud
-
-import (
- "testing"
-
- check "gopkg.in/check.v1"
-)
-
-// Gocheck boilerplate
-func Test(t *testing.T) {
- check.TestingT(t)
-}
package cloud
import (
+ "encoding/json"
"io"
"time"
//
// type exampleDriver struct {}
//
-// func (*exampleDriver) InstanceSet(config map[string]interface{}, id InstanceSetID) (InstanceSet, error) {
+// func (*exampleDriver) InstanceSet(config json.RawMessage, id InstanceSetID) (InstanceSet, error) {
// var is exampleInstanceSet
-// if err := mapstructure.Decode(config, &is); err != nil {
+// if err := json.Unmarshal(config, &is); err != nil {
// return nil, err
// }
// is.ownID = id
//
// var _ = registerCloudDriver("example", &exampleDriver{})
type Driver interface {
- InstanceSet(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+ InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
}
// DriverFunc makes a Driver using the provided function as its
// InstanceSet method. This is similar to http.HandlerFunc.
-func DriverFunc(fn func(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
+func DriverFunc(fn func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
return driverFunc(fn)
}
-type driverFunc func(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
+type driverFunc func(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
-func (df driverFunc) InstanceSet(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error) {
+func (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error) {
return df(config, id, logger)
}
func (s *FederationSuite) TestUpdateRemoteContainerRequest(c *check.C) {
defer s.localServiceReturns404(c).Close()
- req := httptest.NewRequest("PATCH", "/arvados/v1/container_requests/"+arvadostest.QueuedContainerRequestUUID,
- strings.NewReader(`{"container_request": {"priority": 696}}`))
- req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
- req.Header.Set("Content-type", "application/json")
- resp := s.testRequest(req)
- c.Check(resp.StatusCode, check.Equals, http.StatusOK)
- var cr arvados.ContainerRequest
- c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
- c.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)
- c.Check(cr.Priority, check.Equals, 696)
+ setPri := func(pri int) {
+ req := httptest.NewRequest("PATCH", "/arvados/v1/container_requests/"+arvadostest.QueuedContainerRequestUUID,
+ strings.NewReader(fmt.Sprintf(`{"container_request": {"priority": %d}}`, pri)))
+ req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
+ req.Header.Set("Content-type", "application/json")
+ resp := s.testRequest(req)
+ c.Check(resp.StatusCode, check.Equals, http.StatusOK)
+ var cr arvados.ContainerRequest
+ c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
+ c.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)
+ c.Check(cr.Priority, check.Equals, pri)
+ }
+ setPri(696)
+ setPri(1) // Reset fixture so side effect doesn't break other tests.
}
func (s *FederationSuite) TestCreateRemoteContainerRequest(c *check.C) {
h.handlerStack = mux
sc := *arvados.DefaultSecureClient
- sc.Timeout = time.Duration(h.Cluster.HTTPRequestTimeout)
sc.CheckRedirect = neverRedirect
h.secureClient = &sc
ic := *arvados.InsecureHTTPClient
- ic.Timeout = time.Duration(h.Cluster.HTTPRequestTimeout)
ic.CheckRedirect = neverRedirect
h.insecureClient = &ic
// error: it wouldn't help to try again, or to leave
// it for a different dispatcher process to attempt.
errorString := err.Error()
- cq.logger.WithField("ContainerUUID", ctr.UUID).Warn("cancel container with no suitable instance type")
+ logger := cq.logger.WithField("ContainerUUID", ctr.UUID)
+ logger.WithError(err).Warn("cancel container with no suitable instance type")
go func() {
+ if ctr.State == arvados.ContainerStateQueued {
+ // Can't set runtime error without
+ // locking first. If Lock() is
+ // successful, it will call addEnt()
+ // again itself, and we'll fall
+ // through to the
+ // setRuntimeError/Cancel code below.
+ err := cq.Lock(ctr.UUID)
+ if err != nil {
+ logger.WithError(err).Warn("lock failed")
+ // ...and try again on the
+ // next Update, if the problem
+ // still exists.
+ }
+ return
+ }
var err error
defer func() {
if err == nil {
if latest.State == arvados.ContainerStateCancelled {
return
}
- cq.logger.WithField("ContainerUUID", ctr.UUID).WithError(err).Warn("error while trying to cancel unsatisfiable container")
+ logger.WithError(err).Warn("error while trying to cancel unsatisfiable container")
}()
- if ctr.State == arvados.ContainerStateQueued {
- err = cq.Lock(ctr.UUID)
- if err != nil {
- return
- }
- }
err = cq.setRuntimeError(ctr.UUID, errorString)
if err != nil {
return
"fmt"
"git.curoverse.com/arvados.git/lib/cloud"
+ "git.curoverse.com/arvados.git/lib/cloud/azure"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/sirupsen/logrus"
)
var drivers = map[string]cloud.Driver{
- "azure": cloud.DriverFunc(cloud.NewAzureInstanceSet),
+ "azure": azure.Driver,
}
func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
import (
"crypto/rand"
+ "encoding/json"
"errors"
"fmt"
"io"
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/mitchellh/mapstructure"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
}
// InstanceSet returns a new *StubInstanceSet.
-func (sd *StubDriver) InstanceSet(params map[string]interface{}, id cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+func (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
if sd.holdCloudOps == nil {
sd.holdCloudOps = make(chan bool)
}
servers: map[cloud.InstanceID]*StubVM{},
}
sd.instanceSets = append(sd.instanceSets, &sis)
- return &sis, mapstructure.Decode(params, &sis)
+
+ var err error
+ if params != nil {
+ err = json.Unmarshal(params, &sis)
+ }
+ return &sis, err
}
// InstanceSets returns all instances that have been created by the
ImageID string
Driver string
- DriverParameters map[string]interface{}
+ DriverParameters json.RawMessage
}
type InstanceTypeMap map[string]InstanceType
// a number of nanoseconds.
type Duration time.Duration
-// UnmarshalJSON implements json.Unmarshaler
+// UnmarshalJSON implements json.Unmarshaler.
func (d *Duration) UnmarshalJSON(data []byte) error {
if data[0] == '"' {
return d.Set(string(data[1 : len(data)-1]))
return fmt.Errorf("duration must be given as a string like \"600s\" or \"1h30m\"")
}
-// MarshalJSON implements json.Marshaler
+// MarshalJSON implements json.Marshaler.
func (d *Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
-// String implements fmt.Stringer
+// String implements fmt.Stringer.
func (d Duration) String() string {
return time.Duration(d).String()
}
-// Duration returns a time.Duration
+// Duration returns a time.Duration.
func (d Duration) Duration() time.Duration {
return time.Duration(d)
}
-// Value implements flag.Value
+// Set implements the flag.Value interface and sets the duration value by using time.ParseDuration to parse the string.
func (d *Duration) Set(s string) error {
dur, err := time.ParseDuration(s)
*d = Duration(dur)
# Generated git-commit.version file
/git-commit.version
+
+# Generated when building distribution packages
+/package-build.version
sync_past_versions if syncable_updates.any?
if snapshot
snapshot.attributes = self.syncable_updates
+ snapshot.manifest_text = snapshot.signed_manifest_text
snapshot.save
end
end
transaction do
reload
check_lock_fail
- update_attributes!(state: Locked)
+ update_attributes!(state: Locked, lock_count: self.lock_count+1)
end
end
transaction do
reload(lock: 'FOR UPDATE')
check_unlock_fail
- update_attributes!(state: Queued)
+ if self.lock_count < Rails.configuration.max_container_dispatch_attempts
+ update_attributes!(state: Queued)
+ else
+ update_attributes!(state: Cancelled,
+ runtime_status: {
+ error: "Container exceeded 'max_container_dispatch_attempts' (lock_count=#{self.lock_count}."
+ })
+ end
end
end
case self.state
when Locked
- permitted.push :priority, :runtime_status, :log
+ permitted.push :priority, :runtime_status, :log, :lock_count
when Queued
permitted.push :priority
when Running
permitted.push :finished_at, *progress_attrs
when Queued, Locked
- permitted.push :finished_at, :log
+ permitted.push :finished_at, :log, :runtime_status
end
else
# > 0 = auto-create a new version when older than the specified number of seconds.
preserve_version_if_idle: -1
+ # Number of times a container can be unlocked before being
+ # automatically cancelled.
+ max_container_dispatch_attempts: 5
+
development:
force_ssl: false
cache_classes: false
--- /dev/null
+class AddContainerLockCount < ActiveRecord::Migration
+ def change
+ add_column :containers, :lock_count, :int, :null => false, :default => 0
+ end
+end
runtime_status jsonb DEFAULT '{}'::jsonb,
runtime_user_uuid text,
runtime_auth_scopes jsonb,
- runtime_token text
+ runtime_token text,
+ lock_count integer DEFAULT 0 NOT NULL
);
INSERT INTO schema_migrations (version) VALUES ('20181213183234');
+INSERT INTO schema_migrations (version) VALUES ('20190214214814');
+
version: 42,
current_version_uuid: collections(:collection_owned_by_active).uuid,
manifest_text: manifest_text,
- # portable_data_hash: "d30fe8ae534397864cb96c544f4cf102+47"
}
}
assert_response :success
assert_equal 1, resp['version']
assert_equal resp['uuid'], resp['current_version_uuid']
end
+
+ test "update collection with versioning enabled" do
+ Rails.configuration.collection_versioning = true
+ Rails.configuration.preserve_version_if_idle = 1 # 1 second
+
+ col = collections(:collection_owned_by_active)
+ assert_equal 2, col.version
+ assert col.modified_at < Time.now - 1.second
+
+ token = api_client_authorizations(:active).v2token
+ signed = Blob.sign_locator(
+ 'acbd18db4cc2f85cedef654fccc4a4d8+3',
+ key: Rails.configuration.blob_signing_key,
+ api_token: token)
+ authorize_with_token token
+ put :update, {
+ id: col.uuid,
+ collection: {
+ manifest_text: ". #{signed} 0:3:foo.txt\n",
+ },
+ }
+ assert_response :success
+ assert_equal 3, json_response['version']
+ end
end
assert_operator auth_exp, :<, db_current_time
end
+ test "Exceed maximum lock-unlock cycles" do
+ Rails.configuration.max_container_dispatch_attempts = 3
+
+ set_user_from_auth :active
+ c, cr = minimal_new
+
+ set_user_from_auth :dispatch1
+ assert_equal Container::Queued, c.state
+ assert_equal 0, c.lock_count
+
+ c.lock
+ c.reload
+ assert_equal 1, c.lock_count
+ assert_equal Container::Locked, c.state
+
+ c.unlock
+ c.reload
+ assert_equal 1, c.lock_count
+ assert_equal Container::Queued, c.state
+
+ c.lock
+ c.reload
+ assert_equal 2, c.lock_count
+ assert_equal Container::Locked, c.state
+
+ c.unlock
+ c.reload
+ assert_equal 2, c.lock_count
+ assert_equal Container::Queued, c.state
+
+ c.lock
+ c.reload
+ assert_equal 3, c.lock_count
+ assert_equal Container::Locked, c.state
+
+ c.unlock
+ c.reload
+ assert_equal 3, c.lock_count
+ assert_equal Container::Cancelled, c.state
+
+ assert_raise(ArvadosModel::LockFailedError) do
+ # Cancelled to Locked is not allowed
+ c.lock
+ end
+ end
+
test "Container queued cancel" do
set_user_from_auth :active
c, cr = minimal_new({container_count_max: 1})
"revision": "b8bc1bf767474819792c23f32d8286a45736f1c6",
"revisionTime": "2016-12-03T19:45:07Z"
},
- {
- "checksumSHA1": "ewGq4nGalpCQOHcmBTdAEQx1wW0=",
- "path": "github.com/mitchellh/mapstructure",
- "revision": "bb74f1db0675b241733089d5a1faa5dd8b0ef57b",
- "revisionTime": "2018-05-11T14:21:26Z"
- },
{
"checksumSHA1": "OFNit1Qx2DdWhotfREKodDNUwCM=",
"path": "github.com/opencontainers/go-digest",