.sort.flat_map do |parts|
[parts + [nil]] + dir_to_tree.call(File.join(parts))
end
- # Then extend that list with files in this directory.
- subnodes + tree[File.split(dirname)]
+ # Then extend that list with files in this directory, except the empty dir placeholders (0:0:. files).
+ subnodes + tree[File.split(dirname)].reject { |_, basename, size| (basename == '.') and (size == 0) }
end
dir_to_tree.call('.')
end
(If none given, leave config files alone in source tree.)
services/api_test="TEST=test/functional/arvados/v1/collections_controller_test.rb"
Restrict apiserver tests to the given file
-sdk/python_test="--test-suite test.test_keep_locator"
+sdk/python_test="--test-suite tests.test_keep_locator"
Restrict Python SDK tests to the given class
apps/workbench_test="TEST=test/integration/pipeline_instances_test.rb"
Restrict Workbench tests to the given file
lib/cmd
lib/controller
lib/crunchstat
+lib/cloud
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
lib/cmd
lib/controller
lib/crunchstat
+ lib/cloud
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloud
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network"
+ storageacct "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage"
+ "github.com/Azure/azure-sdk-for-go/storage"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/azure/auth"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/jmcvetta/randutil"
+ "github.com/mitchellh/mapstructure"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+)
+
+type AzureInstanceSetConfig struct {
+ SubscriptionID string `mapstructure:"subscription_id"`
+ ClientID string `mapstructure:"key"`
+ ClientSecret string `mapstructure:"secret"`
+ TenantID string `mapstructure:"tenant_id"`
+ CloudEnv string `mapstructure:"cloud_environment"`
+ ResourceGroup string `mapstructure:"resource_group"`
+ Location string `mapstructure:"region"`
+ Network string `mapstructure:"network"`
+ Subnet string `mapstructure:"subnet"`
+ StorageAccount string `mapstructure:"storage_account"`
+ BlobContainer string `mapstructure:"blob_container"`
+ Image string `mapstructure:"image"`
+ DeleteDanglingResourcesAfter float64 `mapstructure:"delete_dangling_resources_after"`
+}
+
+type VirtualMachinesClientWrapper interface {
+ CreateOrUpdate(ctx context.Context,
+ resourceGroupName string,
+ VMName string,
+ parameters compute.VirtualMachine) (result compute.VirtualMachine, err error)
+ Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error)
+ ListComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error)
+}
+
+type VirtualMachinesClientImpl struct {
+ inner compute.VirtualMachinesClient
+}
+
+func (cl *VirtualMachinesClientImpl) CreateOrUpdate(ctx context.Context,
+ resourceGroupName string,
+ VMName string,
+ parameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {
+
+ future, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, VMName, parameters)
+ if err != nil {
+ return compute.VirtualMachine{}, WrapAzureError(err)
+ }
+ future.WaitForCompletionRef(ctx, cl.inner.Client)
+ r, err := future.Result(cl.inner)
+ return r, WrapAzureError(err)
+}
+
+func (cl *VirtualMachinesClientImpl) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+ future, err := cl.inner.Delete(ctx, resourceGroupName, VMName)
+ if err != nil {
+ return nil, WrapAzureError(err)
+ }
+ err = future.WaitForCompletionRef(ctx, cl.inner.Client)
+ return future.Response(), WrapAzureError(err)
+}
+
+func (cl *VirtualMachinesClientImpl) ListComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
+ r, err := cl.inner.ListComplete(ctx, resourceGroupName)
+ return r, WrapAzureError(err)
+}
+
+type InterfacesClientWrapper interface {
+ CreateOrUpdate(ctx context.Context,
+ resourceGroupName string,
+ networkInterfaceName string,
+ parameters network.Interface) (result network.Interface, err error)
+ Delete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result *http.Response, err error)
+ ListComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error)
+}
+
+type InterfacesClientImpl struct {
+ inner network.InterfacesClient
+}
+
+func (cl *InterfacesClientImpl) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+ future, err := cl.inner.Delete(ctx, resourceGroupName, VMName)
+ if err != nil {
+ return nil, WrapAzureError(err)
+ }
+ err = future.WaitForCompletionRef(ctx, cl.inner.Client)
+ return future.Response(), WrapAzureError(err)
+}
+
+func (cl *InterfacesClientImpl) CreateOrUpdate(ctx context.Context,
+ resourceGroupName string,
+ networkInterfaceName string,
+ parameters network.Interface) (result network.Interface, err error) {
+
+ future, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters)
+ if err != nil {
+ return network.Interface{}, WrapAzureError(err)
+ }
+ future.WaitForCompletionRef(ctx, cl.inner.Client)
+ r, err := future.Result(cl.inner)
+ return r, WrapAzureError(err)
+}
+
+func (cl *InterfacesClientImpl) ListComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
+ r, err := cl.inner.ListComplete(ctx, resourceGroupName)
+ return r, WrapAzureError(err)
+}
+
+var quotaRe = regexp.MustCompile(`(?i:exceed|quota|limit)`)
+
+type AzureRateLimitError struct {
+ azure.RequestError
+ earliestRetry time.Time
+}
+
+func (ar *AzureRateLimitError) EarliestRetry() time.Time {
+ return ar.earliestRetry
+}
+
+type AzureQuotaError struct {
+ azure.RequestError
+}
+
+func (ar *AzureQuotaError) IsQuotaError() bool {
+ return true
+}
+
+func WrapAzureError(err error) error {
+ de, ok := err.(autorest.DetailedError)
+ if !ok {
+ return err
+ }
+ rq, ok := de.Original.(*azure.RequestError)
+ if !ok {
+ return err
+ }
+ if rq.Response == nil {
+ return err
+ }
+ if rq.Response.StatusCode == 429 || len(rq.Response.Header["Retry-After"]) >= 1 {
+ // API throttling
+ ra := rq.Response.Header["Retry-After"][0]
+ earliestRetry, parseErr := http.ParseTime(ra)
+ if parseErr != nil {
+ // Could not parse as a timestamp, must be number of seconds
+ dur, parseErr := strconv.ParseInt(ra, 10, 64)
+ if parseErr == nil {
+ earliestRetry = time.Now().Add(time.Duration(dur) * time.Second)
+ } else {
+ // Couldn't make sense of retry-after,
+ // so set retry to 20 seconds
+ earliestRetry = time.Now().Add(20 * time.Second)
+ }
+ }
+ return &AzureRateLimitError{*rq, earliestRetry}
+ }
+ if rq.ServiceError == nil {
+ return err
+ }
+ if quotaRe.FindString(rq.ServiceError.Code) != "" || quotaRe.FindString(rq.ServiceError.Message) != "" {
+ return &AzureQuotaError{*rq}
+ }
+ return err
+}
+
+type AzureInstanceSet struct {
+ azconfig AzureInstanceSetConfig
+ vmClient VirtualMachinesClientWrapper
+ netClient InterfacesClientWrapper
+ storageAcctClient storageacct.AccountsClient
+ azureEnv azure.Environment
+ interfaces map[string]network.Interface
+ dispatcherID string
+ namePrefix string
+ ctx context.Context
+ stopFunc context.CancelFunc
+ stopWg sync.WaitGroup
+ deleteNIC chan string
+ deleteBlob chan storage.Blob
+ logger logrus.FieldLogger
+}
+
+func NewAzureInstanceSet(config map[string]interface{}, dispatcherID InstanceSetID, logger logrus.FieldLogger) (prv InstanceSet, err error) {
+ azcfg := AzureInstanceSetConfig{}
+ if err = mapstructure.Decode(config, &azcfg); err != nil {
+ return nil, err
+ }
+ ap := AzureInstanceSet{logger: logger}
+ err = ap.setup(azcfg, string(dispatcherID))
+ if err != nil {
+ return nil, err
+ }
+ return &ap, nil
+}
+
+func (az *AzureInstanceSet) setup(azcfg AzureInstanceSetConfig, dispatcherID string) (err error) {
+ az.azconfig = azcfg
+ vmClient := compute.NewVirtualMachinesClient(az.azconfig.SubscriptionID)
+ netClient := network.NewInterfacesClient(az.azconfig.SubscriptionID)
+ storageAcctClient := storageacct.NewAccountsClient(az.azconfig.SubscriptionID)
+
+ az.azureEnv, err = azure.EnvironmentFromName(az.azconfig.CloudEnv)
+ if err != nil {
+ return err
+ }
+
+ authorizer, err := auth.ClientCredentialsConfig{
+ ClientID: az.azconfig.ClientID,
+ ClientSecret: az.azconfig.ClientSecret,
+ TenantID: az.azconfig.TenantID,
+ Resource: az.azureEnv.ResourceManagerEndpoint,
+ AADEndpoint: az.azureEnv.ActiveDirectoryEndpoint,
+ }.Authorizer()
+ if err != nil {
+ return err
+ }
+
+ vmClient.Authorizer = authorizer
+ netClient.Authorizer = authorizer
+ storageAcctClient.Authorizer = authorizer
+
+ az.vmClient = &VirtualMachinesClientImpl{vmClient}
+ az.netClient = &InterfacesClientImpl{netClient}
+ az.storageAcctClient = storageAcctClient
+
+ az.dispatcherID = dispatcherID
+ az.namePrefix = fmt.Sprintf("compute-%s-", az.dispatcherID)
+
+ az.ctx, az.stopFunc = context.WithCancel(context.Background())
+ go func() {
+ az.stopWg.Add(1)
+ defer az.stopWg.Done()
+
+ tk := time.NewTicker(5 * time.Minute)
+ for {
+ select {
+ case <-az.ctx.Done():
+ tk.Stop()
+ return
+ case <-tk.C:
+ az.ManageBlobs()
+ }
+ }
+ }()
+
+ az.deleteNIC = make(chan string)
+ az.deleteBlob = make(chan storage.Blob)
+
+ for i := 0; i < 4; i += 1 {
+ go func() {
+ for {
+ nicname, ok := <-az.deleteNIC
+ if !ok {
+ return
+ }
+ _, delerr := az.netClient.Delete(context.Background(), az.azconfig.ResourceGroup, nicname)
+ if delerr != nil {
+ az.logger.WithError(delerr).Warnf("Error deleting %v", nicname)
+ } else {
+ az.logger.Printf("Deleted NIC %v", nicname)
+ }
+ }
+ }()
+ go func() {
+ for {
+ blob, ok := <-az.deleteBlob
+ if !ok {
+ return
+ }
+ err := blob.Delete(nil)
+ if err != nil {
+ az.logger.WithError(err).Warnf("Error deleting %v", blob.Name)
+ } else {
+ az.logger.Printf("Deleted blob %v", blob.Name)
+ }
+ }
+ }()
+ }
+
+ return nil
+}
+
+func (az *AzureInstanceSet) Create(
+ instanceType arvados.InstanceType,
+ imageId ImageID,
+ newTags InstanceTags,
+ publicKey ssh.PublicKey) (Instance, error) {
+
+ az.stopWg.Add(1)
+ defer az.stopWg.Done()
+
+ if len(newTags["node-token"]) == 0 {
+ return nil, fmt.Errorf("Must provide tag 'node-token'")
+ }
+
+ name, err := randutil.String(15, "abcdefghijklmnopqrstuvwxyz0123456789")
+ if err != nil {
+ return nil, err
+ }
+
+ name = az.namePrefix + name
+
+ timestamp := time.Now().Format(time.RFC3339Nano)
+
+ tags := make(map[string]*string)
+ tags["created-at"] = ×tamp
+ for k, v := range newTags {
+ newstr := v
+ tags["dispatch-"+k] = &newstr
+ }
+
+ tags["dispatch-instance-type"] = &instanceType.Name
+
+ nicParameters := network.Interface{
+ Location: &az.azconfig.Location,
+ Tags: tags,
+ InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
+ IPConfigurations: &[]network.InterfaceIPConfiguration{
+ network.InterfaceIPConfiguration{
+ Name: to.StringPtr("ip1"),
+ InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
+ Subnet: &network.Subnet{
+ ID: to.StringPtr(fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers"+
+ "/Microsoft.Network/virtualnetworks/%s/subnets/%s",
+ az.azconfig.SubscriptionID,
+ az.azconfig.ResourceGroup,
+ az.azconfig.Network,
+ az.azconfig.Subnet)),
+ },
+ PrivateIPAllocationMethod: network.Dynamic,
+ },
+ },
+ },
+ },
+ }
+ nic, err := az.netClient.CreateOrUpdate(az.ctx, az.azconfig.ResourceGroup, name+"-nic", nicParameters)
+ if err != nil {
+ return nil, WrapAzureError(err)
+ }
+
+ instance_vhd := fmt.Sprintf("https://%s.blob.%s/%s/%s-os.vhd",
+ az.azconfig.StorageAccount,
+ az.azureEnv.StorageEndpointSuffix,
+ az.azconfig.BlobContainer,
+ name)
+
+ customData := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(`#!/bin/sh
+echo '%s-%s' > /home/crunch/node-token`, name, newTags["node-token"])))
+
+ vmParameters := compute.VirtualMachine{
+ Location: &az.azconfig.Location,
+ Tags: tags,
+ VirtualMachineProperties: &compute.VirtualMachineProperties{
+ HardwareProfile: &compute.HardwareProfile{
+ VMSize: compute.VirtualMachineSizeTypes(instanceType.ProviderType),
+ },
+ StorageProfile: &compute.StorageProfile{
+ OsDisk: &compute.OSDisk{
+ OsType: compute.Linux,
+ Name: to.StringPtr(name + "-os"),
+ CreateOption: compute.FromImage,
+ Image: &compute.VirtualHardDisk{
+ URI: to.StringPtr(string(imageId)),
+ },
+ Vhd: &compute.VirtualHardDisk{
+ URI: &instance_vhd,
+ },
+ },
+ },
+ NetworkProfile: &compute.NetworkProfile{
+ NetworkInterfaces: &[]compute.NetworkInterfaceReference{
+ compute.NetworkInterfaceReference{
+ ID: nic.ID,
+ NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
+ Primary: to.BoolPtr(true),
+ },
+ },
+ },
+ },
+ OsProfile: &compute.OSProfile{
+ ComputerName: &name,
+ AdminUsername: to.StringPtr("crunch"),
+ LinuxConfiguration: &compute.LinuxConfiguration{
+ DisablePasswordAuthentication: to.BoolPtr(true),
+ SSH: &compute.SSHConfiguration{
+ PublicKeys: &[]compute.SSHPublicKey{
+ compute.SSHPublicKey{
+ Path: to.StringPtr("/home/crunch/.ssh/authorized_keys"),
+ KeyData: to.StringPtr(string(ssh.MarshalAuthorizedKey(publicKey))),
+ },
+ },
+ },
+ },
+ CustomData: &customData,
+ },
+ },
+ }
+
+ vm, err := az.vmClient.CreateOrUpdate(az.ctx, az.azconfig.ResourceGroup, name, vmParameters)
+ if err != nil {
+ return nil, WrapAzureError(err)
+ }
+
+ return &AzureInstance{
+ provider: az,
+ nic: nic,
+ vm: vm,
+ }, nil
+}
+
+func (az *AzureInstanceSet) Instances(InstanceTags) ([]Instance, error) {
+ az.stopWg.Add(1)
+ defer az.stopWg.Done()
+
+ interfaces, err := az.ManageNics()
+ if err != nil {
+ return nil, err
+ }
+
+ result, err := az.vmClient.ListComplete(az.ctx, az.azconfig.ResourceGroup)
+ if err != nil {
+ return nil, WrapAzureError(err)
+ }
+
+ instances := make([]Instance, 0)
+
+ for ; result.NotDone(); err = result.Next() {
+ if err != nil {
+ return nil, WrapAzureError(err)
+ }
+ if strings.HasPrefix(*result.Value().Name, az.namePrefix) {
+ instances = append(instances, &AzureInstance{
+ provider: az,
+ vm: result.Value(),
+ nic: interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID]})
+ }
+ }
+ return instances, nil
+}
+
+// ManageNics returns a list of Azure network interface resources.
+// Also performs garbage collection of NICs which have "namePrefix", are
+// not associated with a virtual machine and have a "create-at" time
+// more than DeleteDanglingResourcesAfter (to prevent racing and
+// deleting newly created NICs) in the past are deleted.
+func (az *AzureInstanceSet) ManageNics() (map[string]network.Interface, error) {
+ az.stopWg.Add(1)
+ defer az.stopWg.Done()
+
+ result, err := az.netClient.ListComplete(az.ctx, az.azconfig.ResourceGroup)
+ if err != nil {
+ return nil, WrapAzureError(err)
+ }
+
+ interfaces := make(map[string]network.Interface)
+
+ timestamp := time.Now()
+ for ; result.NotDone(); err = result.Next() {
+ if err != nil {
+ az.logger.WithError(err).Warnf("Error listing nics")
+ return interfaces, nil
+ }
+ if strings.HasPrefix(*result.Value().Name, az.namePrefix) {
+ if result.Value().VirtualMachine != nil {
+ interfaces[*result.Value().ID] = result.Value()
+ } else {
+ if result.Value().Tags["created-at"] != nil {
+ created_at, err := time.Parse(time.RFC3339Nano, *result.Value().Tags["created-at"])
+ if err == nil {
+ if timestamp.Sub(created_at).Seconds() > az.azconfig.DeleteDanglingResourcesAfter {
+ az.logger.Printf("Will delete %v because it is older than %v s", *result.Value().Name, az.azconfig.DeleteDanglingResourcesAfter)
+ az.deleteNIC <- *result.Value().Name
+ }
+ }
+ }
+ }
+ }
+ }
+ return interfaces, nil
+}
+
+// ManageBlobs garbage collects blobs (VM disk images) in the
+// configured storage account container. It will delete blobs which
+// have "namePrefix", are "available" (which means they are not
+// leased to a VM) and haven't been modified for
+// DeleteDanglingResourcesAfter seconds.
+func (az *AzureInstanceSet) ManageBlobs() {
+ result, err := az.storageAcctClient.ListKeys(az.ctx, az.azconfig.ResourceGroup, az.azconfig.StorageAccount)
+ if err != nil {
+ az.logger.WithError(err).Warn("Couldn't get account keys")
+ return
+ }
+
+ key1 := *(*result.Keys)[0].Value
+ client, err := storage.NewBasicClientOnSovereignCloud(az.azconfig.StorageAccount, key1, az.azureEnv)
+ if err != nil {
+ az.logger.WithError(err).Warn("Couldn't make client")
+ return
+ }
+
+ blobsvc := client.GetBlobService()
+ blobcont := blobsvc.GetContainerReference(az.azconfig.BlobContainer)
+
+ page := storage.ListBlobsParameters{Prefix: az.namePrefix}
+ timestamp := time.Now()
+
+ for {
+ response, err := blobcont.ListBlobs(page)
+ if err != nil {
+ az.logger.WithError(err).Warn("Error listing blobs")
+ return
+ }
+ for _, b := range response.Blobs {
+ age := timestamp.Sub(time.Time(b.Properties.LastModified))
+ if b.Properties.BlobType == storage.BlobTypePage &&
+ b.Properties.LeaseState == "available" &&
+ b.Properties.LeaseStatus == "unlocked" &&
+ age.Seconds() > az.azconfig.DeleteDanglingResourcesAfter {
+
+ az.logger.Printf("Blob %v is unlocked and not modified for %v seconds, will delete", b.Name, age.Seconds())
+ az.deleteBlob <- b
+ }
+ }
+ if response.NextMarker != "" {
+ page.Marker = response.NextMarker
+ } else {
+ break
+ }
+ }
+}
+
+func (az *AzureInstanceSet) Stop() {
+ az.stopFunc()
+ az.stopWg.Wait()
+ close(az.deleteNIC)
+ close(az.deleteBlob)
+}
+
+type AzureInstance struct {
+ provider *AzureInstanceSet
+ nic network.Interface
+ vm compute.VirtualMachine
+}
+
+func (ai *AzureInstance) ID() InstanceID {
+ return InstanceID(*ai.vm.ID)
+}
+
+func (ai *AzureInstance) String() string {
+ return *ai.vm.Name
+}
+
+func (ai *AzureInstance) ProviderType() string {
+ return string(ai.vm.VirtualMachineProperties.HardwareProfile.VMSize)
+}
+
+func (ai *AzureInstance) SetTags(newTags InstanceTags) error {
+ ai.provider.stopWg.Add(1)
+ defer ai.provider.stopWg.Done()
+
+ tags := make(map[string]*string)
+
+ for k, v := range ai.vm.Tags {
+ if !strings.HasPrefix(k, "dispatch-") {
+ tags[k] = v
+ }
+ }
+ for k, v := range newTags {
+ newstr := v
+ tags["dispatch-"+k] = &newstr
+ }
+
+ vmParameters := compute.VirtualMachine{
+ Location: &ai.provider.azconfig.Location,
+ Tags: tags,
+ }
+ vm, err := ai.provider.vmClient.CreateOrUpdate(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name, vmParameters)
+ if err != nil {
+ return WrapAzureError(err)
+ }
+ ai.vm = vm
+
+ return nil
+}
+
+func (ai *AzureInstance) Tags() InstanceTags {
+ tags := make(map[string]string)
+
+ for k, v := range ai.vm.Tags {
+ if strings.HasPrefix(k, "dispatch-") {
+ tags[k[9:]] = *v
+ }
+ }
+
+ return tags
+}
+
+func (ai *AzureInstance) Destroy() error {
+ ai.provider.stopWg.Add(1)
+ defer ai.provider.stopWg.Done()
+
+ _, err := ai.provider.vmClient.Delete(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name)
+ return WrapAzureError(err)
+}
+
+func (ai *AzureInstance) Address() string {
+ return *(*ai.nic.IPConfigurations)[0].PrivateIPAddress
+}
+
+func (ai *AzureInstance) VerifyHostKey(receivedKey ssh.PublicKey, client *ssh.Client) error {
+ ai.provider.stopWg.Add(1)
+ defer ai.provider.stopWg.Done()
+
+ remoteFingerprint := ssh.FingerprintSHA256(receivedKey)
+
+ tags := ai.Tags()
+
+ tg := tags["ssh-pubkey-fingerprint"]
+ if tg != "" {
+ if remoteFingerprint == tg {
+ return nil
+ } else {
+ return fmt.Errorf("Key fingerprint did not match, expected %q got %q", tg, remoteFingerprint)
+ }
+ }
+
+ nodetokenTag := tags["node-token"]
+ if nodetokenTag == "" {
+ return fmt.Errorf("Missing node token tag")
+ }
+
+ sess, err := client.NewSession()
+ if err != nil {
+ return err
+ }
+
+ nodetokenbytes, err := sess.Output("cat /home/crunch/node-token")
+ if err != nil {
+ return err
+ }
+
+ nodetoken := strings.TrimSpace(string(nodetokenbytes))
+
+ expectedToken := fmt.Sprintf("%s-%s", *ai.vm.Name, nodetokenTag)
+
+ if strings.TrimSpace(nodetoken) != expectedToken {
+ return fmt.Errorf("Node token did not match, expected %q got %q", expectedToken, nodetoken)
+ }
+
+ sess, err = client.NewSession()
+ if err != nil {
+ return err
+ }
+
+ keyfingerprintbytes, err := sess.Output("ssh-keygen -E sha256 -l -f /etc/ssh/ssh_host_rsa_key.pub")
+ if err != nil {
+ return err
+ }
+
+ sp := strings.Split(string(keyfingerprintbytes), " ")
+
+ if remoteFingerprint != sp[1] {
+ return fmt.Errorf("Key fingerprint did not match, expected %q got %q", sp[1], remoteFingerprint)
+ }
+
+ tags["ssh-pubkey-fingerprint"] = sp[1]
+ delete(tags, "node-token")
+ ai.SetTags(tags)
+ return nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+//
+//
+// How to manually run individual tests against the real cloud
+//
+// $ go test -v git.curoverse.com/arvados.git/lib/cloud -live-azure-cfg azconfig.yml -check.f=TestListInstances
+//
+// Example azconfig.yml:
+//
+// subscription_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+// key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+// region: centralus
+// cloud_environment: AzurePublicCloud
+// secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+// tenant_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+// resource_group: zzzzz
+// network: zzzzz
+// subnet: zzzzz-subnet-private
+// storage_account: example
+// blob_container: vhds
+// image: "https://example.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX.vhd"
+// delete_dangling_resources_after: 20
+// authorized_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLQS1ExT2+WjA0d/hntEAyAtgeN1W2ik2QX8c2zO6HjlPHWXL92r07W0WMuDib40Pcevpi1BXeBWXA9ZB5KKMJB+ukaAu22KklnQuUmNvk6ZXnPKSkGxuCYvPQb08WhHf3p1VxiKfP3iauedBDM4x9/bkJohlBBQiFXzNUcQ+a6rKiMzmJN2gbL8ncyUzc+XQ5q4JndTwTGtOlzDiGOc9O4z5Dd76wtAVJneOuuNpwfFRVHThpJM6VThpCZOnl8APaceWXKeuwOuCae3COZMz++xQfxOfZ9Z8aIwo+TlQhsRaNfZ4Vjrop6ej8dtfZtgUFKfbXEOYaHrGrWGotFDTD example@example"
+
+package cloud
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "os"
+ "time"
+
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/config"
+ "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
+ "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network"
+ "github.com/Azure/azure-sdk-for-go/storage"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/jmcvetta/randutil"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+ check "gopkg.in/check.v1"
+)
+
+type AzureInstanceSetSuite struct{}
+
+var _ = check.Suite(&AzureInstanceSetSuite{})
+
+type VirtualMachinesClientStub struct{}
+
+var testKey []byte = []byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLQS1ExT2+WjA0d/hntEAyAtgeN1W2ik2QX8c2zO6HjlPHWXL92r07W0WMuDib40Pcevpi1BXeBWXA9ZB5KKMJB+ukaAu22KklnQuUmNvk6ZXnPKSkGxuCYvPQb08WhHf3p1VxiKfP3iauedBDM4x9/bkJohlBBQiFXzNUcQ+a6rKiMzmJN2gbL8ncyUzc+XQ5q4JndTwTGtOlzDiGOc9O4z5Dd76wtAVJneOuuNpwfFRVHThpJM6VThpCZOnl8APaceWXKeuwOuCae3COZMz++xQfxOfZ9Z8aIwo+TlQhsRaNfZ4Vjrop6ej8dtfZtgUFKfbXEOYaHrGrWGotFDTD example@example`)
+
+func (*VirtualMachinesClientStub) CreateOrUpdate(ctx context.Context,
+ resourceGroupName string,
+ VMName string,
+ parameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {
+ parameters.ID = &VMName
+ parameters.Name = &VMName
+ return parameters, nil
+}
+
+func (*VirtualMachinesClientStub) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+ return nil, nil
+}
+
+func (*VirtualMachinesClientStub) ListComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {
+ return compute.VirtualMachineListResultIterator{}, nil
+}
+
+type InterfacesClientStub struct{}
+
+func (*InterfacesClientStub) CreateOrUpdate(ctx context.Context,
+ resourceGroupName string,
+ nicName string,
+ parameters network.Interface) (result network.Interface, err error) {
+ parameters.ID = to.StringPtr(nicName)
+ (*parameters.IPConfigurations)[0].PrivateIPAddress = to.StringPtr("192.168.5.5")
+ return parameters, nil
+}
+
+func (*InterfacesClientStub) Delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {
+ return nil, nil
+}
+
+func (*InterfacesClientStub) ListComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {
+ return network.InterfaceListResultIterator{}, nil
+}
+
+var live = flag.String("live-azure-cfg", "", "Test with real azure API, provide config file")
+
+func GetInstanceSet() (InstanceSet, ImageID, arvados.Cluster, error) {
+ cluster := arvados.Cluster{
+ InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
+ "tiny": arvados.InstanceType{
+ Name: "tiny",
+ ProviderType: "Standard_D1_v2",
+ VCPUs: 1,
+ RAM: 4000000000,
+ Scratch: 10000000000,
+ Price: .02,
+ Preemptible: false,
+ },
+ })}
+ if *live != "" {
+ cfg := make(map[string]interface{})
+ err := config.LoadFile(&cfg, *live)
+ if err != nil {
+ return nil, ImageID(""), cluster, err
+ }
+ ap, err := NewAzureInstanceSet(cfg, "test123", logrus.StandardLogger())
+ return ap, ImageID(cfg["image"].(string)), cluster, err
+ } else {
+ ap := AzureInstanceSet{
+ azconfig: AzureInstanceSetConfig{
+ BlobContainer: "vhds",
+ },
+ dispatcherID: "test123",
+ namePrefix: "compute-test123-",
+ logger: logrus.StandardLogger(),
+ deleteNIC: make(chan string),
+ deleteBlob: make(chan storage.Blob),
+ }
+ ap.ctx, ap.stopFunc = context.WithCancel(context.Background())
+ ap.vmClient = &VirtualMachinesClientStub{}
+ ap.netClient = &InterfacesClientStub{}
+ return &ap, ImageID("blob"), cluster, nil
+ }
+}
+
+func (*AzureInstanceSetSuite) TestCreate(c *check.C) {
+ ap, img, cluster, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ pk, _, _, _, err := ssh.ParseAuthorizedKey(testKey)
+ c.Assert(err, check.IsNil)
+
+ nodetoken, err := randutil.String(40, "abcdefghijklmnopqrstuvwxyz0123456789")
+ c.Assert(err, check.IsNil)
+
+ inst, err := ap.Create(cluster.InstanceTypes["tiny"],
+ img, map[string]string{
+ "node-token": nodetoken},
+ pk)
+
+ c.Assert(err, check.IsNil)
+
+ tg := inst.Tags()
+ log.Printf("Result %v %v %v", inst.String(), inst.Address(), tg)
+
+}
+
+func (*AzureInstanceSetSuite) TestListInstances(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ l, err := ap.Instances(nil)
+
+ c.Assert(err, check.IsNil)
+
+ for _, i := range l {
+ tg := i.Tags()
+ log.Printf("%v %v %v", i.String(), i.Address(), tg)
+ }
+}
+
+func (*AzureInstanceSetSuite) TestManageNics(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ ap.(*AzureInstanceSet).ManageNics()
+ ap.Stop()
+}
+
+func (*AzureInstanceSetSuite) TestManageBlobs(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ ap.(*AzureInstanceSet).ManageBlobs()
+ ap.Stop()
+}
+
+func (*AzureInstanceSetSuite) TestDestroyInstances(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ l, err := ap.Instances(nil)
+ c.Assert(err, check.IsNil)
+
+ for _, i := range l {
+ c.Check(i.Destroy(), check.IsNil)
+ }
+}
+
+func (*AzureInstanceSetSuite) TestDeleteFake(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ _, err = ap.(*AzureInstanceSet).netClient.Delete(context.Background(), "fakefakefake", "fakefakefake")
+
+ de, ok := err.(autorest.DetailedError)
+ if ok {
+ rq := de.Original.(*azure.RequestError)
+
+ log.Printf("%v %q %q", rq.Response.StatusCode, rq.ServiceError.Code, rq.ServiceError.Message)
+ }
+}
+
+func (*AzureInstanceSetSuite) TestWrapError(c *check.C) {
+ retryError := autorest.DetailedError{
+ Original: &azure.RequestError{
+ DetailedError: autorest.DetailedError{
+ Response: &http.Response{
+ StatusCode: 429,
+ Header: map[string][]string{"Retry-After": []string{"123"}},
+ },
+ },
+ ServiceError: &azure.ServiceError{},
+ },
+ }
+ wrapped := WrapAzureError(retryError)
+ _, ok := wrapped.(RateLimitError)
+ c.Check(ok, check.Equals, true)
+
+ quotaError := autorest.DetailedError{
+ Original: &azure.RequestError{
+ DetailedError: autorest.DetailedError{
+ Response: &http.Response{
+ StatusCode: 503,
+ },
+ },
+ ServiceError: &azure.ServiceError{
+ Message: "No more quota",
+ },
+ },
+ }
+ wrapped = WrapAzureError(quotaError)
+ _, ok = wrapped.(QuotaError)
+ c.Check(ok, check.Equals, true)
+}
+
+func (*AzureInstanceSetSuite) TestSetTags(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+ l, err := ap.Instances(nil)
+ c.Assert(err, check.IsNil)
+
+ if len(l) > 0 {
+ err = l[0].SetTags(map[string]string{"foo": "bar"})
+ if err != nil {
+ c.Fatal("Error setting tags", err)
+ }
+ }
+ l, err = ap.Instances(nil)
+ c.Assert(err, check.IsNil)
+
+ if len(l) > 0 {
+ tg := l[0].Tags()
+ log.Printf("tags are %v", tg)
+ }
+}
+
+func (*AzureInstanceSetSuite) TestSSH(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+ l, err := ap.Instances(nil)
+ c.Assert(err, check.IsNil)
+
+ if len(l) > 0 {
+
+ sshclient, err := SetupSSHClient(c, l[0])
+ c.Assert(err, check.IsNil)
+
+ sess, err := sshclient.NewSession()
+ c.Assert(err, check.IsNil)
+
+ out, err := sess.Output("cat /home/crunch/node-token")
+ c.Assert(err, check.IsNil)
+
+ log.Printf("%v", string(out))
+
+ sshclient.Conn.Close()
+ }
+}
+
+func SetupSSHClient(c *check.C, inst Instance) (*ssh.Client, error) {
+ addr := inst.Address() + ":2222"
+ if addr == "" {
+ return nil, errors.New("instance has no address")
+ }
+
+ f, err := os.Open("azconfig_sshkey")
+ c.Assert(err, check.IsNil)
+
+ keybytes, err := ioutil.ReadAll(f)
+ c.Assert(err, check.IsNil)
+
+ priv, err := ssh.ParsePrivateKey(keybytes)
+ c.Assert(err, check.IsNil)
+
+ var receivedKey ssh.PublicKey
+ client, err := ssh.Dial("tcp", addr, &ssh.ClientConfig{
+ User: "crunch",
+ Auth: []ssh.AuthMethod{
+ ssh.PublicKeys(priv),
+ },
+ HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
+ receivedKey = key
+ return nil
+ },
+ Timeout: time.Minute,
+ })
+
+ if err != nil {
+ return nil, err
+ } else if receivedKey == nil {
+ return nil, errors.New("BUG: key was never provided to HostKeyCallback")
+ }
+
+ err = inst.VerifyHostKey(receivedKey, client)
+ c.Assert(err, check.IsNil)
+
+ return client, nil
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package cloud
+
+import (
+ "testing"
+
+ check "gopkg.in/check.v1"
+)
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
+ "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
//
// var _ = registerCloudDriver("example", &exampleDriver{})
type Driver interface {
- InstanceSet(config map[string]interface{}, id InstanceSetID) (InstanceSet, error)
+ InstanceSet(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
}
// DriverFunc makes a Driver using the provided function as its
// InstanceSet method. This is similar to http.HandlerFunc.
-func DriverFunc(fn func(config map[string]interface{}, id InstanceSetID) (InstanceSet, error)) Driver {
+func DriverFunc(fn func(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)) Driver {
return driverFunc(fn)
}
-type driverFunc func(config map[string]interface{}, id InstanceSetID) (InstanceSet, error)
+type driverFunc func(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error)
-func (df driverFunc) InstanceSet(config map[string]interface{}, id InstanceSetID) (InstanceSet, error) {
- return df(config, id)
+func (df driverFunc) InstanceSet(config map[string]interface{}, id InstanceSetID, logger logrus.FieldLogger) (InstanceSet, error) {
+ return df(config, id, logger)
}
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
)
type typeChooser func(*arvados.Container) (arvados.InstanceType, error)
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/auth"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
- "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
disp.sshKey = key
}
- instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID)
+ instanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID, disp.logger)
if err != nil {
disp.logger.Fatalf("error initializing driver: %s", err)
}
"git.curoverse.com/arvados.git/lib/dispatchcloud/test"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
check "gopkg.in/check.v1"
)
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
+ "github.com/sirupsen/logrus"
)
-var drivers = map[string]cloud.Driver{}
+var drivers = map[string]cloud.Driver{
+ "azure": cloud.DriverFunc(cloud.NewAzureInstanceSet),
+}
-func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID) (cloud.InstanceSet, error) {
+func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
driver, ok := drivers[cluster.CloudVMs.Driver]
if !ok {
return nil, fmt.Errorf("unsupported cloud driver %q", cluster.CloudVMs.Driver)
}
- return driver.InstanceSet(cluster.CloudVMs.DriverParameters, setID)
+ return driver.InstanceSet(cluster.CloudVMs.DriverParameters, setID, logger)
}
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/lib/dispatchcloud/container"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
func (sch *Scheduler) runQueue() {
"git.curoverse.com/arvados.git/lib/dispatchcloud/test"
"git.curoverse.com/arvados.git/lib/dispatchcloud/worker"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
"sync"
"time"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
// A Scheduler maps queued containers onto unallocated workers in
"git.curoverse.com/arvados.git/lib/dispatchcloud/container"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
// sync resolves discrepancies between the queue and the pool:
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
"github.com/mitchellh/mapstructure"
+ "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
}
// InstanceSet returns a new *StubInstanceSet.
-func (sd *StubDriver) InstanceSet(params map[string]interface{}, id cloud.InstanceSetID) (cloud.InstanceSet, error) {
+func (sd *StubDriver) InstanceSet(params map[string]interface{}, id cloud.InstanceSetID,
+ logger logrus.FieldLogger) (cloud.InstanceSet, error) {
+
sis := StubInstanceSet{
driver: sd,
servers: map[cloud.InstanceID]*StubVM{},
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
)
const (
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/lib/dispatchcloud/test"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
// State indicates whether a worker is available to do work, and (if
"git.curoverse.com/arvados.git/lib/cmd"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
- "github.com/Sirupsen/logrus"
"github.com/coreos/go-systemd/daemon"
+ "github.com/sirupsen/logrus"
)
type Handler interface {
import (
"context"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
var (
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
const (
"time"
"git.curoverse.com/arvados.git/sdk/go/stats"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
type contextKey struct {
"testing"
"time"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
"git.curoverse.com/arvados.git/sdk/go/auth"
"git.curoverse.com/arvados.git/sdk/go/stats"
- "github.com/Sirupsen/logrus"
"github.com/gogo/protobuf/jsonpb"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
+ "github.com/sirupsen/logrus"
)
type Handler interface {
from __future__ import absolute_import
from . import config
+import re
+
+def escape(path):
+ path = re.sub('\\\\', lambda m: '\\134', path)
+ path = re.sub('[:\000-\040]', lambda m: "\\%03o" % ord(m.group(0)), path)
+ return path
+
def normalize_stream(stream_name, stream):
"""Take manifest stream and return a list of tokens in normalized format.
"""
- stream_name = stream_name.replace(' ', '\\040')
+ stream_name = escape(stream_name)
stream_tokens = [stream_name]
sortedfiles = list(stream.keys())
sortedfiles.sort()
for streamfile in sortedfiles:
# Add in file segments
current_span = None
- fout = streamfile.replace(' ', '\\040')
+ fout = escape(streamfile)
for segment in stream[streamfile]:
# Collapse adjacent segments
streamoffset = blocks[segment.locator] + segment.segment_offset
from .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, WrappableFile, _BlockManager, synchronized, must_be_writable, NoopLock
from .keep import KeepLocator, KeepClient
from .stream import StreamReader
-from ._normalize_stream import normalize_stream
+from ._normalize_stream import normalize_stream, escape
from ._ranges import Range, LocatorAndRange
from .safeapi import ThreadSafeApiCache
import arvados.config as config
def stream_name(self):
raise NotImplementedError()
+
@synchronized
def has_remote_blocks(self):
"""Recursively check for a +R segment locator signature."""
if stream:
buf.append(" ".join(normalize_stream(stream_name, stream)) + "\n")
for dirname in [s for s in sorted_keys if isinstance(self[s], RichCollectionBase)]:
- buf.append(self[dirname].manifest_text(stream_name=os.path.join(stream_name, dirname), strip=strip, normalize=True, only_committed=only_committed))
+ buf.append(self[dirname].manifest_text(
+ stream_name=os.path.join(stream_name, dirname),
+ strip=strip, normalize=True, only_committed=only_committed))
return "".join(buf)
else:
if strip:
self.name = newname
self.lock = self.parent.root_collection().lock
+ @synchronized
+ def _get_manifest_text(self, stream_name, strip, normalize, only_committed=False):
+ """Encode empty directories by using an \056-named (".") empty file"""
+ if len(self._items) == 0:
+ return "%s %s 0:0:\\056\n" % (
+ escape(stream_name), config.EMPTY_BLOCK_LOCATOR)
+ return super(Subcollection, self)._get_manifest_text(stream_name,
+ strip, normalize,
+ only_committed)
+
class CollectionReader(Collection):
"""A read-only collection object.
self.assertIs(c.find("./nonexistant.txt"), None)
self.assertIs(c.find("./nonexistantsubdir/nonexistant.txt"), None)
+ def test_escaped_paths_dont_get_unescaped_on_manifest(self):
+ # Dir & file names are literally '\056' (escaped form: \134056)
+ manifest = './\\134056\\040Test d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\134056\n'
+ c = Collection(manifest)
+ self.assertEqual(c.portable_manifest_text(), manifest)
+
+ def test_other_special_chars_on_file_token(self):
+ cases = [
+ ('\\000', '\0'),
+ ('\\011', '\t'),
+ ('\\012', '\n'),
+ ('\\072', ':'),
+ ('\\134400', '\\400'),
+ ]
+ for encoded, decoded in cases:
+ manifest = '. d41d8cd98f00b204e9800998ecf8427e+0 0:0:some%sfile.txt\n' % encoded
+ c = Collection(manifest)
+ self.assertEqual(c.portable_manifest_text(), manifest)
+ self.assertIn('some%sfile.txt' % decoded, c.keys())
+
+ def test_escaped_paths_do_get_unescaped_on_listing(self):
+ # Dir & file names are literally '\056' (escaped form: \134056)
+ manifest = './\\134056\\040Test d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\134056\n'
+ c = Collection(manifest)
+ self.assertIn('\\056 Test', c.keys())
+ self.assertIn('\\056', c['\\056 Test'].keys())
+
+ def test_make_empty_dir_with_escaped_chars(self):
+ c = Collection()
+ c.mkdirs('./Empty\\056Dir')
+ self.assertEqual(c.portable_manifest_text(),
+ './Empty\\134056Dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n')
+
+ def test_make_empty_dir_with_spaces(self):
+ c = Collection()
+ c.mkdirs('./foo bar/baz waz')
+ self.assertEqual(c.portable_manifest_text(),
+ './foo\\040bar/baz\\040waz d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n')
+
def test_remove_in_subdir(self):
c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
c.remove("foo/count2.txt")
- self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n", c.portable_manifest_text())
+ self.assertEqual(". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n", c.portable_manifest_text())
def test_remove_empty_subdir(self):
c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\n')
end
class Manifest
- STRICT_STREAM_TOKEN_REGEXP = /^(\.)(\/[^\/\s]+)*$/
- STRICT_FILE_TOKEN_REGEXP = /^[[:digit:]]+:[[:digit:]]+:([^\s\/]+(\/[^\s\/]+)*)$/
+ STREAM_TOKEN_REGEXP = /^([^\000-\040\\]|\\[0-3][0-7][0-7])+$/
+ STREAM_NAME_REGEXP = /^(\.)(\/[^\/]+)*$/
+
+ EMPTY_DIR_TOKEN_REGEXP = /^0:0:\.$/ # The exception when a file can have '.' as a name
+ FILE_TOKEN_REGEXP = /^[[:digit:]]+:[[:digit:]]+:([^\000-\040\\]|\\[0-3][0-7][0-7])+$/
+ FILE_NAME_REGEXP = /^[[:digit:]]+:[[:digit:]]+:([^\/]+(\/[^\/]+)*)$/
+
+ NON_8BIT_ENCODED_CHAR = /[^\\]\\[4-7][0-7][0-7]/
# Class to parse a manifest text and provide common views of that data.
def initialize(manifest_text)
end
end
- def unescape(s)
+ def self.unescape(s)
+ return nil if s.nil?
+
# Parse backslash escapes in a Keep manifest stream or file name.
s.gsub(/\\(\\|[0-7]{3})/) do |_|
case $1
end
end
+ def unescape(s)
+ self.class.unescape(s)
+ end
+
def split_file_token token
start_pos, filesize, filename = token.split(':', 3)
if filename.nil?
elsif in_file_tokens or not Locator.valid? token
in_file_tokens = true
- file_tokens = split_file_token(token)
+ start_pos, file_size, file_name = split_file_token(token)
stream_name_adjuster = ''
- if file_tokens[2].include?('/') # '/' in filename
- parts = file_tokens[2].rpartition('/')
- stream_name_adjuster = parts[1] + parts[0] # /dir_parts
- file_tokens[2] = parts[2]
+ if file_name.include?('/') # '/' in filename
+ dirname, sep, basename = file_name.rpartition('/')
+ stream_name_adjuster = sep + dirname # /dir_parts
+ file_name = basename
end
- yield [stream_name + stream_name_adjuster] + file_tokens
+ yield [stream_name + stream_name_adjuster, start_pos, file_size, file_name]
end
end
end
# files. This can help you avoid parsing the entire manifest if you
# just want to check if a small number of files are specified.
if stop_after.nil? or not @files.nil?
- return files.size
+ # Avoid counting empty dir placeholders
+ return files.reject{|_, name, size| name == '.' and size == 0}.size
end
seen_files = {}
- each_file_spec do |streamname, _, _, filename|
+ each_file_spec do |streamname, _, filesize, filename|
+ # Avoid counting empty dir placeholders
+ next if filename == "." and filesize == 0
seen_files[[streamname, filename]] = true
return stop_after if (seen_files.size >= stop_after)
end
count = 0
word = words.shift
- count += 1 if word =~ STRICT_STREAM_TOKEN_REGEXP and word !~ /\/\.\.?(\/|$)/
+ raise ArgumentError.new "Manifest invalid for stream #{line_count}: >8-bit encoded chars not allowed on stream token #{word.inspect}" if word =~ NON_8BIT_ENCODED_CHAR
+ unescaped_word = unescape(word)
+ count += 1 if word =~ STREAM_TOKEN_REGEXP and unescaped_word =~ STREAM_NAME_REGEXP and unescaped_word !~ /\/\.\.?(\/|$)/
raise ArgumentError.new "Manifest invalid for stream #{line_count}: missing or invalid stream name #{word.inspect if word}" if count != 1
count = 0
raise ArgumentError.new "Manifest invalid for stream #{line_count}: missing or invalid locator #{word.inspect if word}" if count == 0
count = 0
- while word =~ STRICT_FILE_TOKEN_REGEXP and ($~[1].split('/') & ['..','.']).empty?
+ raise ArgumentError.new "Manifest invalid for stream #{line_count}: >8-bit encoded chars not allowed on file token #{word.inspect}" if word =~ NON_8BIT_ENCODED_CHAR
+ while unescape(word) =~ EMPTY_DIR_TOKEN_REGEXP or
+ (word =~ FILE_TOKEN_REGEXP and unescape(word) =~ FILE_NAME_REGEXP and ($~[1].split('/') & ['..', '.']).empty?)
word = words.shift
count += 1
end
assert_equal(0, Keep::Manifest.new("").files_count)
end
+ def test_empty_dir_files_count
+ assert_equal(0,
+ Keep::Manifest.new("./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n").files_count)
+ end
+
def test_empty_files_size
assert_equal(0, Keep::Manifest.new("").files_size)
end
[true, ". 00000000000000000000000000000000+0 0:0:0\n"],
[true, ". 00000000000000000000000000000000+0 0:0:d41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000@ffffffff\n"],
[true, ". d41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000@ffffffff 0:0:empty.txt\n"],
+ [true, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\n"],
[false, '. d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt',
"Invalid manifest: does not end with newline"],
[false, "abc d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
"invalid stream name \"./abc/..\""],
[false, "./abc/./foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\n",
"invalid stream name \"./abc/./foo\""],
- [false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:.\n",
- "invalid file token \"0:0:.\""],
+ # non-empty '.'-named file tokens aren't acceptable. Empty ones are used as empty dir placeholders.
+ [false, ". 8cf8463b34caa8ac871a52d5dd7ad1ef+1 0:1:.\n",
+ "invalid file token \"0:1:.\""],
[false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:..\n",
"invalid file token \"0:0:..\""],
[false, ". d41d8cd98f00b204e9800998ecf8427e 0:0:./abc.txt\n",
"Manifest invalid for stream 1: invalid file token \"0:0:foo//bar.txt\""],
[false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/\n",
"Manifest invalid for stream 1: invalid file token \"0:0:foo/\""],
+ # escaped chars
+ [true, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\n"],
+ [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\\056\n",
+ "Manifest invalid for stream 1: invalid file token \"0:0:\\\\056\\\\056\""],
+ [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\056\\056\\057foo\n",
+ "Manifest invalid for stream 1: invalid file token \"0:0:\\\\056\\\\056\\\\057foo\""],
+ [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0\\0720\\072foo\n",
+ "Manifest invalid for stream 1: invalid file token \"0\\\\0720\\\\072foo\""],
+ [false, "./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 \\060:\\060:foo\n",
+ "Manifest invalid for stream 1: invalid file token \"\\\\060:\\\\060:foo\""],
+ [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\057bar\n"],
+ [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\072\n"],
+ [true, ".\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+ [true, "\\056\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+ [true, "./\\134444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+ [false, "./\\\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \"./\\\\\\\\444\""],
+ [true, "./\\011foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+ [false, "./\\011/.. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \"./\\\\011/..\""],
+ [false, ".\\056\\057 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \".\\\\056\\\\057\""],
+ [false, ".\\057\\056 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \".\\\\057\\\\056\""],
+ [false, ".\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\444\n",
+ "Manifest invalid for stream 1: >8-bit encoded chars not allowed on file token \"0:0:foo\\\\444\""],
+ [false, "./\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: >8-bit encoded chars not allowed on stream token \"./\\\\444\""],
+ [false, "./\tfoo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \"./\\tfoo\""],
+ [false, "./foo\\ d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \"./foo\\\\\""],
+ [false, "./foo\\r d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \"./foo\\\\r\""],
+ [false, "./foo\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: >8-bit encoded chars not allowed on stream token \"./foo\\\\444\""],
+ [false, "./foo\\888 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \"./foo\\\\888\""],
+ [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\n",
+ "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\\""],
+ [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\r\n",
+ "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\r\""],
+ [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\444\n",
+ "Manifest invalid for stream 1: >8-bit encoded chars not allowed on file token \"0:0:foo\\\\444\""],
+ [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\888\n",
+ "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\888\""],
+ [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\057/bar\n",
+ "Manifest invalid for stream 1: invalid file token \"0:0:foo\\\\057/bar\""],
+ [false, ".\\057/Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n",
+ "Manifest invalid for stream 1: missing or invalid stream name \".\\\\057/Data\""],
+ [true, "./Data\\040Folder d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\n"],
+ [false, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\057foo/bar\n",
+ "Manifest invalid for stream 1: invalid file token \"0:0:\\\\057foo/bar\""],
+ [true, ". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\134057foo/bar\n"],
+ [false, ". d41d8cd98f00b204e9800998ecf8427e+0 \\040:\\040:foo.txt\n",
+ "Manifest invalid for stream 1: invalid file token \"\\\\040:\\\\040:foo.txt\""],
].each do |ok, manifest, expected_error=nil|
define_method "test_validate manifest #{manifest.inspect}" do
assert_equal ok, Keep::Manifest.valid?(manifest)
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
var version = "dev"
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
. "gopkg.in/check.v1"
)
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/config"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
- "github.com/Sirupsen/logrus"
"github.com/coreos/go-systemd/daemon"
+ "github.com/sirupsen/logrus"
)
type logger interface {
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/dispatch"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
. "gopkg.in/check.v1"
)
import (
"time"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
. "gopkg.in/check.v1"
)
self.assertIn(self.testcollection,
llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))
self.assertIn(self.test_project, mount_ls)
- self.assertIn(self.test_project,
+ self.assertIn(self.test_project,
llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))
with self.assertRaises(OSError):
r'\./testdir 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
self.pool.apply(fuseRmTestHelperDeleteFile, (self.mounttmp,))
- # Can't have empty directories :-( so manifest will be empty.
+ # Empty directories are represented by an empty file named "."
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
- self.assertEqual(collection2["manifest_text"], "")
+ self.assertRegexpMatches(collection2["manifest_text"],
+ r'./testdir d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:\\056\n')
self.pool.apply(fuseRmTestHelperRmdir, (self.mounttmp,))
collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()
self.assertRegexpMatches(collection2["manifest_text"],
- r'\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt$')
+ r'\. 86fb269d190d2c85f6e0468ceca42a20\+12\+A\S+ 0:12:file1\.txt\n\./testdir d41d8cd98f00b204e9800998ecf8427e\+0\+A\S+ 0:0:\\056\n')
def fuseRenameTestHelper(mounttmp):
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/health"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
)
var version = "dev"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
// Balancer compares the contents of keepstore servers with the
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
"git.curoverse.com/arvados.git/sdk/go/arvadosclient"
"git.curoverse.com/arvados.git/sdk/go/arvadostest"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/config"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
var debugf = func(string, ...interface{}) {}
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/auth"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
var version = "dev"
"git.curoverse.com/arvados.git/sdk/go/health"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
"golang.org/x/net/webdav"
)
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/config"
- log "github.com/Sirupsen/logrus"
"github.com/coreos/go-systemd/daemon"
+ log "github.com/sirupsen/logrus"
)
var (
"git.curoverse.com/arvados.git/sdk/go/health"
"git.curoverse.com/arvados.git/sdk/go/httpserver"
"git.curoverse.com/arvados.git/sdk/go/keepclient"
- log "github.com/Sirupsen/logrus"
"github.com/coreos/go-systemd/daemon"
"github.com/ghodss/yaml"
"github.com/gorilla/mux"
+ log "github.com/sirupsen/logrus"
)
var version = "dev"
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
type Config struct {
package main
import (
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
func init() {
"git.curoverse.com/arvados.git/sdk/go/ctxlog"
"git.curoverse.com/arvados.git/sdk/go/health"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
"golang.org/x/net/websocket"
)
"time"
"git.curoverse.com/arvados.git/sdk/go/arvados"
- "github.com/Sirupsen/logrus"
+ "github.com/sirupsen/logrus"
)
var (
COMPOSER_ROOT="$ARVBOX_DATA/composer"
fi
+if test -z "$WORKBENCH2_ROOT" ; then
+ WORKBENCH2_ROOT="$ARVBOX_DATA/workbench2"
+fi
+
PG_DATA="$ARVBOX_DATA/postgres"
VAR_DATA="$ARVBOX_DATA/var"
PASSENGER="$ARVBOX_DATA/passenger"
docker logs -f $ARVBOX_CONTAINER > $FF &
LOGPID=$!
while read line ; do
- echo $line
- if echo $line | grep "Workbench is running at" >/dev/null ; then
+ if echo $line | grep "ok: down: ready:" >/dev/null ; then
kill $LOGPID
+ else
+ echo $line
fi
done < $FF
rm $FF
echo $localip > $iptemp
chmod og+r $iptemp
PUBLIC="--volume=$iptemp:/var/run/localip_override
- --publish=80:80
+ --publish=443:443
+ --publish=3001:3001
--publish=8000:8000
--publish=8900:8900
--publish=9001:9001
if ! test -d "$COMPOSER_ROOT" ; then
git clone https://github.com/curoverse/composer.git "$COMPOSER_ROOT"
fi
+ if ! test -d "$WORKBENCH2_ROOT" ; then
+ git clone https://github.com/curoverse/arvados-workbench2.git "$WORKBENCH2_ROOT"
+ fi
if test "$CONFIG" = test ; then
"--volume=$ARVADOS_ROOT:/usr/src/arvados:rw" \
"--volume=$SSO_ROOT:/usr/src/sso:rw" \
"--volume=$COMPOSER_ROOT:/usr/src/composer:rw" \
+ "--volume=$WORKBENCH2_ROOT:/usr/src/workbench2:rw" \
"--volume=$PG_DATA:/var/lib/postgresql:rw" \
"--volume=$VAR_DATA:/var/lib/arvados:rw" \
"--volume=$PASSENGER:/var/lib/passenger:rw" \
"--volume=$ARVADOS_ROOT:/usr/src/arvados:rw" \
"--volume=$SSO_ROOT:/usr/src/sso:rw" \
"--volume=$COMPOSER_ROOT:/usr/src/composer:rw" \
+ "--volume=$WORKBENCH2_ROOT:/usr/src/workbench2:rw" \
"--volume=$PG_DATA:/var/lib/postgresql:rw" \
"--volume=$VAR_DATA:/var/lib/arvados:rw" \
"--volume=$PASSENGER:/var/lib/passenger:rw" \
updateconf
wait_for_arvbox
echo "The Arvados source code is checked out at: $ARVADOS_ROOT"
+ echo "The Arvados testing root certificate is $VAR_DATA/root-cert.pem"
else
echo "Unknown configuration '$CONFIG'"
fi
RUN curl -L -f https://nodejs.org/dist/${NODEVERSION}/node-${NODEVERSION}-linux-x64.tar.xz | tar -C /usr/local -xJf - && \
ln -s ../node-${NODEVERSION}-linux-x64/bin/node ../node-${NODEVERSION}-linux-x64/bin/npm /usr/local/bin
+# Set UTF-8 locale
RUN echo en_US.UTF-8 UTF-8 > /etc/locale.gen && locale-gen
+ENV LANG en_US.UTF-8
+ENV LANGUAGE en_US:en
+ENV LC_ALL en_US.UTF-8
ARG arvados_version
RUN echo arvados_version is git commit $arvados_version
ARG arvados_version
ARG sso_version=master
ARG composer_version=master
+ARG workbench2_version=master
RUN cd /usr/src && \
git clone --no-checkout https://github.com/curoverse/arvados.git && \
git clone --no-checkout https://github.com/curoverse/sso-devise-omniauth-provider.git sso && \
git -C sso checkout ${sso_version} && \
git clone --no-checkout https://github.com/curoverse/composer.git && \
- git -C composer checkout ${composer_version}
+ git -C composer checkout ${composer_version} && \
+ git clone --no-checkout https://github.com/curoverse/arvados-workbench2.git workbench2 && \
+ git -C workbench2 checkout ${workbench2_version}
ADD service/ /var/lib/arvbox/service
RUN ln -sf /var/lib/arvbox/service /etc
RUN chown -R 1000:1000 /usr/src && /usr/local/lib/arvbox/createusers.sh
RUN sudo -u arvbox /var/lib/arvbox/service/composer/run-service --only-deps
+RUN sudo -u arvbox /var/lib/arvbox/service/workbench2/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/keep-web/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/sso/run-service --only-deps
RUN sudo -u arvbox /var/lib/arvbox/service/api/run-service --only-deps
fi
management_token=$(cat /var/lib/arvados/management_token)
-# self signed key will be created by SSO server script.
-test -s /var/lib/arvados/self-signed.key
-
sso_app_secret=$(cat /var/lib/arvados/sso_app_secret)
if test -s /var/lib/arvados/vm-uuid ; then
sso_app_secret: $sso_app_secret
sso_app_id: arvados-server
sso_provider_url: "https://$localip:${services[sso]}"
- sso_insecure: true
- workbench_address: "http://$localip/"
- websocket_address: "ws://$localip:${services[websockets]}/websocket"
+ sso_insecure: false
+ workbench_address: "https://$localip/"
+ websocket_address: "wss://$localip:${services[websockets-ssl]}/websocket"
git_repo_ssh_base: "git@$localip:"
git_repo_https_base: "http://$localip:${services[arv-git-httpd]}/"
new_users_are_active: true
auto_setup_new_users_with_repository: true
default_collection_replication: 1
docker_image_formats: ["v2"]
- keep_web_service_url: http://$localip:${services[keep-web]}/
+ keep_web_service_url: https://$localip:${services[keep-web-ssl]}/
ManagementToken: $management_token
EOF
declare -A services
services=(
- [workbench]=80
+ [workbench]=443
+ [workbench2]=3000
+ [workbench2-ssl]=3001
[api]=8004
[controller]=8003
[controller-ssl]=8000
[sso]=8900
[composer]=4200
[arv-git-httpd]=9001
- [keep-web]=9002
+ [keep-web]=9003
+ [keep-web-ssl]=9002
[keepproxy]=25100
[keepstore0]=25107
[keepstore1]=25108
[ssh]=22
[doc]=8001
- [websockets]=8002
+ [websockets]=8005
+ [websockets-ssl]=8002
)
if test "$(id arvbox -u 2>/dev/null)" = 0 ; then
--- /dev/null
+/usr/local/lib/arvbox/logger
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+if test ! -s /var/lib/arvados/root-cert.pem ; then
+ # req signing request sub-command
+ # -new new certificate request
+ # -nodes "no des" don't encrypt key
+ # -sha256 include sha256 fingerprint
+ # -x509 generate self-signed certificate
+ # -subj certificate subject
+ # -reqexts certificate request extension for subjectAltName
+ # -extensions certificate request extension for subjectAltName
+ # -config certificate generation configuration plus subjectAltName
+ # -out certificate output
+ # -keyout private key output
+ # -days certificate lifetime
+ openssl req \
+ -new \
+ -nodes \
+ -sha256 \
+ -x509 \
+ -subj "/C=US/ST=MA/O=Arvados testing/OU=arvbox/CN=arvbox testing root CA for ${uuid_prefix}" \
+ -extensions x509_ext \
+ -config <(cat /etc/ssl/openssl.cnf \
+ <(printf "\n[x509_ext]\nbasicConstraints=critical,CA:true,pathlen:0\nkeyUsage=critical,keyCertSign,cRLSign")) \
+ -out /var/lib/arvados/root-cert.pem \
+ -keyout /var/lib/arvados/root-cert.key \
+ -days 365
+ chown arvbox:arvbox /var/lib/arvados/root-cert.*
+fi
+
+if test ! -s /var/lib/arvados/server-cert-${localip}.pem ; then
+ # req signing request sub-command
+ # -new new certificate request
+ # -nodes "no des" don't encrypt key
+ # -sha256 include sha256 fingerprint
+ # -subj certificate subject
+ # -reqexts certificate request extension for subjectAltName
+ # -extensions certificate request extension for subjectAltName
+ # -config certificate generation configuration plus subjectAltName
+ # -out certificate output
+ # -keyout private key output
+ # -days certificate lifetime
+ openssl req \
+ -new \
+ -nodes \
+ -sha256 \
+ -subj "/C=US/ST=MA/O=Arvados testing for ${uuid_prefix}/OU=arvbox/CN=localhost" \
+ -reqexts x509_ext \
+ -extensions x509_ext \
+ -config <(cat /etc/ssl/openssl.cnf \
+ <(printf "\n[x509_ext]\nkeyUsage=critical,digitalSignature,keyEncipherment\nsubjectAltName=DNS:localhost,IP:$localip")) \
+ -out /var/lib/arvados/server-cert-${localip}.csr \
+ -keyout /var/lib/arvados/server-cert-${localip}.key \
+ -days 365
+
+ openssl x509 \
+ -req \
+ -in /var/lib/arvados/server-cert-${localip}.csr \
+ -CA /var/lib/arvados/root-cert.pem \
+ -CAkey /var/lib/arvados/root-cert.key \
+ -out /var/lib/arvados/server-cert-${localip}.pem \
+ -set_serial $RANDOM$RANDOM \
+ -extfile <(cat /etc/ssl/openssl.cnf \
+ <(printf "\n[x509_ext]\nkeyUsage=critical,digitalSignature,keyEncipherment\nsubjectAltName=DNS:localhost,IP:$localip")) \
+ -extensions x509_ext
+
+ chown arvbox:arvbox /var/lib/arvados/server-cert-${localip}.*
+fi
+
+cp /var/lib/arvados/root-cert.pem /usr/local/share/ca-certificates/arvados-testing-cert.crt
+update-ca-certificates
+
+sv stop certificate
\ No newline at end of file
set -e
-/usr/local/lib/arvbox/runsu.sh $0-service $1
+exec /usr/local/lib/arvbox/runsu.sh $0-service $1
gitolite_tmp: /var/lib/arvados/git
arvados_api_host: $localip:${services[controller-ssl]}
arvados_api_token: "$ARVADOS_API_TOKEN"
- arvados_api_host_insecure: true
+ arvados_api_host_insecure: false
gitolite_arvados_git_user_key: "$git_user_key"
EOF
server {
listen *:${services[controller-ssl]} ssl default_server;
server_name controller;
- ssl_certificate "/var/lib/arvados/self-signed.pem";
- ssl_certificate_key "/var/lib/arvados/self-signed.key";
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
location / {
proxy_pass http://controller;
proxy_set_header Host \$http_host;
proxy_redirect off;
}
}
+
+upstream arvados-ws {
+ server localhost:${services[websockets]};
+}
+server {
+ listen *:${services[websockets-ssl]} ssl default_server;
+ server_name websockets;
+
+ proxy_connect_timeout 90s;
+ proxy_read_timeout 300s;
+
+ ssl on;
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+
+ location / {
+ proxy_pass http://arvados-ws;
+ proxy_set_header Upgrade \$http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ }
+}
+
+ upstream workbench2 {
+ server localhost:${services[workbench2]};
+ }
+ server {
+ listen *:${services[workbench2-ssl]} ssl default_server;
+ server_name workbench2;
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ location / {
+ proxy_pass http://workbench2;
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_redirect off;
+ }
+ location /sockjs-node {
+ proxy_pass http://workbench2;
+ proxy_set_header Upgrade \$http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ }
+ }
+
+ upstream keep-web {
+ server localhost:${services[keep-web]};
+ }
+ server {
+ listen *:${services[keep-web-ssl]} ssl default_server;
+ server_name keep-web;
+ ssl_certificate "/var/lib/arvados/server-cert-${localip}.pem";
+ ssl_certificate_key "/var/lib/arvados/server-cert-${localip}.key";
+ location / {
+ proxy_pass http://keep-web;
+ proxy_set_header Host \$http_host;
+ proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto https;
+ proxy_redirect off;
+ }
+ }
+
}
EOF
echo
echo "Your Arvados-in-a-box is ready!"
-echo "Workbench is running at http://$localip"
+echo "Workbench is running at https://$localip"
+echo "Workbench2 is running at https://$localip:${services[workbench2-ssl]}"
rm -r /tmp/arvbox-ready
mkdir /var/run/sshd
chmod 0755 /var/run/sshd
fi
-/usr/sbin/sshd -D
+exec /usr/sbin/sshd -D
fi
secret_token=$(cat /var/lib/arvados/sso_secret_token)
-if ! test -s /var/lib/arvados/self-signed.key ; then
- openssl req -new -x509 -nodes -out /var/lib/arvados/self-signed.pem -keyout /var/lib/arvados/self-signed.key -days 365 -subj '/CN=localhost'
-fi
+test -s /var/lib/arvados/server-cert-${localip}.pem
cat >config/application.yml <<EOF
$RAILS_ENV:
fi
exec bundle exec passenger start --port=${services[sso]} \
- --ssl --ssl-certificate=/var/lib/arvados/self-signed.pem \
- --ssl-certificate-key=/var/lib/arvados/self-signed.key
+ --ssl --ssl-certificate=/var/lib/arvados/server-cert-${localip}.pem \
+ --ssl-certificate-key=/var/lib/arvados/server-cert-${localip}.key
cat >/var/lib/arvados/arvados-ws.yml <<EOF
Client:
APIHost: $localip:${services[controller-ssl]}
- Insecure: true
+ Insecure: false
Postgres:
dbname: arvados_$RAILS_ENV
user: arvados
password: $database_pw
host: localhost
-Listen: :8002
+Listen: localhost:${services[websockets]}
EOF
exec /usr/local/bin/arvados-ws -config /var/lib/arvados/arvados-ws.yml
if test "$1" != "--only-deps" ; then
exec bundle exec passenger start --port=${services[workbench]} \
+ --ssl --ssl-certificate=/var/lib/arvados/server-cert-${localip}.pem \
+ --ssl-certificate-key=/var/lib/arvados/server-cert-${localip}.key \
--user arvbox
fi
fi
secret_token=$(cat /var/lib/arvados/workbench_secret_token)
-if ! test -s self-signed.key ; then
- openssl req -new -x509 -nodes -out self-signed.pem -keyout self-signed.key -days 365 -subj '/CN=localhost'
-fi
-
cat >config/application.yml <<EOF
$RAILS_ENV:
secret_token: $secret_token
arvados_login_base: https://$localip:${services[controller-ssl]}/login
arvados_v1_base: https://$localip:${services[controller-ssl]}/arvados/v1
- arvados_insecure_https: true
- keep_web_download_url: http://$localip:${services[keep-web]}/c=%{uuid_or_pdh}
- keep_web_url: http://$localip:${services[keep-web]}/c=%{uuid_or_pdh}
+ arvados_insecure_https: false
+ keep_web_download_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
+ keep_web_url: https://$localip:${services[keep-web-ssl]}/c=%{uuid_or_pdh}
arvados_docsite: http://$localip:${services[doc]}/
force_ssl: false
composer_url: http://$localip:${services[composer]}
--- /dev/null
+/usr/local/lib/arvbox/logger
\ No newline at end of file
--- /dev/null
+#!/bin/sh
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+set -e
+
+exec /usr/local/lib/arvbox/runsu.sh $0-service $1
--- /dev/null
+#!/bin/bash
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: AGPL-3.0
+
+exec 2>&1
+set -ex -o pipefail
+
+. /usr/local/lib/arvbox/common.sh
+
+cd /usr/src/workbench2
+
+npm -d install --prefix /usr/local --global yarn
+
+yarn install
+
+if test "$1" = "--only-deps" ; then
+ exit
+fi
+
+cat <<EOF > /usr/src/workbench2/public/config.json
+{
+ "API_HOST": "${localip}:${services[controller-ssl]}",
+ "VOCABULARY_URL": "vocabulary-example.json",
+ "FILE_VIEWERS_CONFIG_URL": "file-viewers-example.json"
+}
+EOF
+
+export HTTPS=false
+# Can't use "yarn start", need to run the dev server script
+# directly so that the TERM signal from "sv restart" gets to the
+# right process.
+exec node node_modules/react-scripts-ts/scripts/start.js
"revision": "888b4804f2653cd35ebcc95f046079e63b5b2799",
"revisionTime": "2017-07-27T13:52:37Z"
},
+ {
+ "checksumSHA1": "KF4DsRUpZ+h+qRQ/umRAQZfVvw0=",
+ "path": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute",
+ "revision": "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a",
+ "revisionTime": "2018-07-27T22:05:59Z"
+ },
+ {
+ "checksumSHA1": "IZNzp1cYx+xYHd4gzosKpG6Jr/k=",
+ "path": "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network",
+ "revision": "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a",
+ "revisionTime": "2018-07-27T22:05:59Z"
+ },
+ {
+ "checksumSHA1": "W4c2uTDJlwhfryWg9esshmJANo0=",
+ "path": "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage",
+ "revision": "4e8cbbfb1aeab140cd0fa97fd16b64ee18c3ca6a",
+ "revisionTime": "2018-07-27T22:05:59Z"
+ },
{
"checksumSHA1": "xHZe/h/tyrqmS9qiR03bLfRv5FI=",
"path": "github.com/Azure/azure-sdk-for-go/storage",
"revisionTime": "2018-02-14T01:17:07Z"
},
{
- "checksumSHA1": "LQWU/2M2E4L/hVzT9BVW1SkLrpA=",
+ "checksumSHA1": "1Y2+bSzYrdPHQqRjR1OrBMHAvxY=",
"path": "github.com/Azure/go-autorest/autorest",
- "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
- "revisionTime": "2017-11-30T17:00:06Z"
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
},
{
- "checksumSHA1": "nBQ7cdhoeYUur6G6HG97uueoDmE=",
+ "checksumSHA1": "GxL0HHpZDj2milPhR3SPV6MWLPc=",
"path": "github.com/Azure/go-autorest/autorest/adal",
- "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
- "revisionTime": "2017-11-30T17:00:06Z"
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
},
{
- "checksumSHA1": "zXyLmDVpkYkIsL0yinNLoW82IZc=",
+ "checksumSHA1": "ZNgwJOdHZmm4k/HJIbT1L5giO6M=",
"path": "github.com/Azure/go-autorest/autorest/azure",
- "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
- "revisionTime": "2017-11-30T17:00:06Z"
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
+ },
+ {
+ "checksumSHA1": "6i7kwcXGTn55WqfubQs21swgr34=",
+ "path": "github.com/Azure/go-autorest/autorest/azure/auth",
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
},
{
"checksumSHA1": "9nXCi9qQsYjxCeajJKWttxgEt0I=",
"path": "github.com/Azure/go-autorest/autorest/date",
- "revision": "a91c94d19d5efcb398b3aab64b8766e724aa7442",
- "revisionTime": "2017-11-30T17:00:06Z"
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
+ },
+ {
+ "checksumSHA1": "SbBb2GcJNm5GjuPKGL2777QywR4=",
+ "path": "github.com/Azure/go-autorest/autorest/to",
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
+ },
+ {
+ "checksumSHA1": "HjdLfAF3oA2In8F3FKh/Y+BPyXk=",
+ "path": "github.com/Azure/go-autorest/autorest/validation",
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
+ },
+ {
+ "checksumSHA1": "b2lrPJRxf+MEfmMafN40wepi5WM=",
+ "path": "github.com/Azure/go-autorest/logger",
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
+ },
+ {
+ "checksumSHA1": "UtAIMAsMWLBJ6yO1qZ0soFnb0sI=",
+ "path": "github.com/Azure/go-autorest/version",
+ "revision": "39013ecb48eaf6ced3f4e3e1d95515140ce6b3cf",
+ "revisionTime": "2018-08-09T20:19:59Z"
},
{
"checksumSHA1": "o/3cn04KAiwC7NqNVvmfVTD+hgA=",
"revision": "78439966b38d69bf38227fbf57ac8a6fee70f69a",
"revisionTime": "2017-08-04T20:09:54Z"
},
- {
- "checksumSHA1": "CWLxwFSj7MNed2MzAOSm0Cg9p+o=",
- "path": "github.com/Sirupsen/logrus",
- "revision": "d682213848ed68c0a260ca37d6dd5ace8423f5ba",
- "revisionTime": "2017-12-05T20:32:29Z"
- },
{
"checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=",
"path": "github.com/beorn7/perks/quantile",
"revision": "dbeaa9332f19a944acb5736b4456cfcc02140e29",
"revisionTime": "2017-10-19T21:57:19Z"
},
+ {
+ "checksumSHA1": "7EjxkAUND/QY/sN+2fNKJ52v1Rc=",
+ "path": "github.com/dimchansky/utfbom",
+ "revision": "5448fe645cb1964ba70ac8f9f2ffe975e61a536c",
+ "revisionTime": "2018-07-13T13:37:17Z"
+ },
{
"checksumSHA1": "Gj+xR1VgFKKmFXYOJMnAczC3Znk=",
"path": "github.com/docker/distribution/digestset",
"revision": "1744e2970ca51c86172c8190fadad617561ed6e7",
"revisionTime": "2017-11-10T11:01:46Z"
},
+ {
+ "checksumSHA1": "ySaT8G3I3y4MmnoXOYAAX0rC+p8=",
+ "path": "github.com/sirupsen/logrus",
+ "revision": "d682213848ed68c0a260ca37d6dd5ace8423f5ba",
+ "revisionTime": "2017-12-05T20:32:29Z"
+ },
{
"checksumSHA1": "8QeSG127zQqbA+YfkO1WkKx/iUI=",
"path": "github.com/src-d/gcfg",
"revision": "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8",
"revisionTime": "2017-11-25T19:00:56Z"
},
+ {
+ "checksumSHA1": "PJY7uCr3UnX4/Mf/RoWnbieSZ8o=",
+ "path": "golang.org/x/crypto/pkcs12",
+ "revision": "614d502a4dac94afa3a6ce146bd1736da82514c6",
+ "revisionTime": "2018-07-28T08:01:47Z"
+ },
+ {
+ "checksumSHA1": "p0GC51McIdA7JygoP223twJ1s0E=",
+ "path": "golang.org/x/crypto/pkcs12/internal/rc2",
+ "revision": "614d502a4dac94afa3a6ce146bd1736da82514c6",
+ "revisionTime": "2018-07-28T08:01:47Z"
+ },
{
"checksumSHA1": "NHjGg73p5iGZ+7tflJ4cVABNmKE=",
"path": "golang.org/x/crypto/ssh",