--upload
If the build and test steps are successful, upload the packages
to a remote apt repository (default: false)
+--rc
+ Optional Parameter to build Release Candidate
--build-version <version>
Version to build (default:
\$ARVADOS_BUILDING_VERSION-\$ARVADOS_BUILDING_ITERATION or
fi
PARSEDOPTS=$(getopt --name "$0" --longoptions \
- help,upload,target:,build-version: \
+ help,upload,rc,target:,build-version: \
-- "" "$@")
if [ $? -ne 0 ]; then
exit 1
TARGET=debian8
UPLOAD=0
+RC=0
declare -a build_args=()
--upload)
UPLOAD=1
;;
+ --rc)
+ RC=1
+ ;;
--build-version)
build_args+=("$1" "$2")
shift
timer_reset
if [ ${#failures[@]} -eq 0 ]; then
- echo "/usr/local/arvados-dev/jenkins/run_upload_packages.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET"
- /usr/local/arvados-dev/jenkins/run_upload_packages.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET
+ if [[ "$RC" != 0 ]]; then
+ echo "/usr/local/arvados-dev/jenkins/run_upload_packages_testing.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET"
+ /usr/local/arvados-dev/jenkins/run_upload_packages_testing.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET
+ else
+ echo "/usr/local/arvados-dev/jenkins/run_upload_packages.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET"
+ /usr/local/arvados-dev/jenkins/run_upload_packages.py -H jenkinsapt@apt.arvados.org -o Port=2222 --workspace $WORKSPACE $TARGET
+ fi
else
echo "Skipping package upload, there were errors building and/or testing the packages"
fi
title "End of upload packages (`timer`)"
fi
-exit_cleanly
+exit_cleanly
\ No newline at end of file
lib/crunchstat
lib/cloud
lib/cloud/azure
+ lib/cloud/ec2
lib/dispatchcloud
lib/dispatchcloud/container
lib/dispatchcloud/scheduler
h3. current master branch
+h4. Stricter collection manifest validation on the API server
+
+As a consequence of "#14482":https://dev.arvados.org/issues/14482, the Ruby SDK does a more rigorous collection manifest validation. Collections created after 2015-05 are unlikely to be invalid, however you may check for invalid manifests using the script below.
+
+You could set up a new rvm gemset and install the specific arvados gem for testing, like so:
+
+<notextile>
+<pre><code>~$ <span class="userinput">rvm gemset create rubysdk-test</span>
+~$ <span class="userinput">rvm gemset use rubysdk-test</span>
+~$ <span class="userinput">gem install arvados -v 1.3.1.20190301212059</span>
+</code></pre>
+</notextile>
+
+Next, you can run the following script using admin credentials, it will scan the whole collection database and report any collection that didn't pass the check:
+
+{% codeblock as ruby %}
+require 'arvados'
+require 'arvados/keep'
+
+api = Arvados.new
+offset = 0
+batch_size = 100
+invalid = []
+
+while true
+ begin
+ req = api.collection.index(
+ :select => [:uuid, :created_at, :manifest_text],
+ :include_trash => true, :include_old_versions => true,
+ :limit => batch_size, :offset => offset)
+ rescue
+ invalid.each {|c| puts "#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}" }
+ raise
+ end
+
+ req[:items].each do |col|
+ begin
+ Keep::Manifest.validate! col[:manifest_text]
+ rescue Exception => e
+ puts "Collection #{col[:uuid]} manifest not valid"
+ invalid << {uuid: col[:uuid], error: e, created_at: col[:created_at]}
+ end
+ end
+ puts "Checked #{offset} / #{req[:items_available]} - Invalid: #{invalid.size}"
+ offset += req[:limit]
+ break if offset > req[:items_available]
+end
+
+if invalid.empty?
+ puts "No invalid collection manifests found"
+else
+ invalid.each {|c| puts "#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}" }
+end
+{% endcodeblock %}
+
+The script will return a final report enumerating any invalid collection by UUID, with its creation date and error message so you can take the proper correction measures, if needed.
+
h4. Python packaging change
As part of story "#9945":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. These packages now include a built-in virtualenv to reduce dependencies on system packages. We have also stopped packaging and publishing backports for all the Python dependencies of our packages, as they are no longer needed.
return nil, err
}
- ap := azureInstanceSet{logger: logger}
- err = ap.setup(azcfg, string(dispatcherID))
+ az := azureInstanceSet{logger: logger}
+ az.ctx, az.stopFunc = context.WithCancel(context.Background())
+ err = az.setup(azcfg, string(dispatcherID))
if err != nil {
+ az.stopFunc()
return nil, err
}
- return &ap, nil
+ return &az, nil
}
func (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID string) (err error) {
az.dispatcherID = dispatcherID
az.namePrefix = fmt.Sprintf("compute-%s-", az.dispatcherID)
- az.ctx, az.stopFunc = context.WithCancel(context.Background())
go func() {
az.stopWg.Add(1)
defer az.stopWg.Done()
"time"
"git.curoverse.com/arvados.git/lib/cloud"
+ "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"git.curoverse.com/arvados.git/sdk/go/config"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-06-01/compute"
type VirtualMachinesClientStub struct{}
-var testKey = []byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLQS1ExT2+WjA0d/hntEAyAtgeN1W2ik2QX8c2zO6HjlPHWXL92r07W0WMuDib40Pcevpi1BXeBWXA9ZB5KKMJB+ukaAu22KklnQuUmNvk6ZXnPKSkGxuCYvPQb08WhHf3p1VxiKfP3iauedBDM4x9/bkJohlBBQiFXzNUcQ+a6rKiMzmJN2gbL8ncyUzc+XQ5q4JndTwTGtOlzDiGOc9O4z5Dd76wtAVJneOuuNpwfFRVHThpJM6VThpCZOnl8APaceWXKeuwOuCae3COZMz++xQfxOfZ9Z8aIwo+TlQhsRaNfZ4Vjrop6ej8dtfZtgUFKfbXEOYaHrGrWGotFDTD example@example`)
-
func (*VirtualMachinesClientStub) createOrUpdate(ctx context.Context,
resourceGroupName string,
VMName string,
c.Fatal("Error making provider", err)
}
- pk, _, _, _, err := ssh.ParseAuthorizedKey(testKey)
+ pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
c.Assert(err, check.IsNil)
inst, err := ap.Create(cluster.InstanceTypes["tiny"],
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+
+package ec2
+
+import (
+ "crypto/md5"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "math/big"
+ "strings"
+ "sync"
+
+ "git.curoverse.com/arvados.git/lib/cloud"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/crypto/ssh"
+)
+
+const arvadosDispatchID = "arvados-dispatch-id"
+const tagPrefix = "arvados-dispatch-tag-"
+
+// Driver is the ec2 implementation of the cloud.Driver interface.
+var Driver = cloud.DriverFunc(newEC2InstanceSet)
+
+type ec2InstanceSetConfig struct {
+ AccessKeyID string
+ SecretAccessKey string
+ Region string
+ SecurityGroupIDs []string
+ SubnetID string
+ AdminUsername string
+ EBSVolumeType string
+}
+
+type ec2Interface interface {
+ DescribeKeyPairs(input *ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error)
+ ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error)
+ RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error)
+ DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error)
+ CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
+ TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error)
+}
+
+type ec2InstanceSet struct {
+ ec2config ec2InstanceSetConfig
+ dispatcherID cloud.InstanceSetID
+ logger logrus.FieldLogger
+ client ec2Interface
+ keysMtx sync.Mutex
+ keys map[string]string
+}
+
+func newEC2InstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, logger logrus.FieldLogger) (prv cloud.InstanceSet, err error) {
+ instanceSet := &ec2InstanceSet{
+ dispatcherID: dispatcherID,
+ logger: logger,
+ }
+ err = json.Unmarshal(config, &instanceSet.ec2config)
+ if err != nil {
+ return nil, err
+ }
+ awsConfig := aws.NewConfig().
+ WithCredentials(credentials.NewStaticCredentials(
+ instanceSet.ec2config.AccessKeyID,
+ instanceSet.ec2config.SecretAccessKey,
+ "")).
+ WithRegion(instanceSet.ec2config.Region)
+ instanceSet.client = ec2.New(session.Must(session.NewSession(awsConfig)))
+ instanceSet.keys = make(map[string]string)
+ if instanceSet.ec2config.EBSVolumeType == "" {
+ instanceSet.ec2config.EBSVolumeType = "gp2"
+ }
+ return instanceSet, nil
+}
+
+func awsKeyFingerprint(pk ssh.PublicKey) (md5fp string, sha1fp string, err error) {
+ // AWS key fingerprints don't use the usual key fingerprint
+ // you get from ssh-keygen or ssh.FingerprintLegacyMD5()
+ // (you can get that from md5.Sum(pk.Marshal())
+ //
+ // AWS uses the md5 or sha1 of the PKIX DER encoding of the
+ // public key, so calculate those fingerprints here.
+ var rsaPub struct {
+ Name string
+ E *big.Int
+ N *big.Int
+ }
+ if err := ssh.Unmarshal(pk.Marshal(), &rsaPub); err != nil {
+ return "", "", fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err)
+ }
+ rsaPk := rsa.PublicKey{
+ E: int(rsaPub.E.Int64()),
+ N: rsaPub.N,
+ }
+ pkix, _ := x509.MarshalPKIXPublicKey(&rsaPk)
+ md5pkix := md5.Sum([]byte(pkix))
+ sha1pkix := sha1.Sum([]byte(pkix))
+ md5fp = ""
+ sha1fp = ""
+ for i := 0; i < len(md5pkix); i += 1 {
+ md5fp += fmt.Sprintf(":%02x", md5pkix[i])
+ }
+ for i := 0; i < len(sha1pkix); i += 1 {
+ sha1fp += fmt.Sprintf(":%02x", sha1pkix[i])
+ }
+ return md5fp[1:], sha1fp[1:], nil
+}
+
+func (instanceSet *ec2InstanceSet) Create(
+ instanceType arvados.InstanceType,
+ imageID cloud.ImageID,
+ newTags cloud.InstanceTags,
+ initCommand cloud.InitCommand,
+ publicKey ssh.PublicKey) (cloud.Instance, error) {
+
+ md5keyFingerprint, sha1keyFingerprint, err := awsKeyFingerprint(publicKey)
+ if err != nil {
+ return nil, fmt.Errorf("Could not make key fingerprint: %v", err)
+ }
+ instanceSet.keysMtx.Lock()
+ var keyname string
+ var ok bool
+ if keyname, ok = instanceSet.keys[md5keyFingerprint]; !ok {
+ keyout, err := instanceSet.client.DescribeKeyPairs(&ec2.DescribeKeyPairsInput{
+ Filters: []*ec2.Filter{&ec2.Filter{
+ Name: aws.String("fingerprint"),
+ Values: []*string{&md5keyFingerprint, &sha1keyFingerprint},
+ }},
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Could not search for keypair: %v", err)
+ }
+
+ if len(keyout.KeyPairs) > 0 {
+ keyname = *(keyout.KeyPairs[0].KeyName)
+ } else {
+ keyname = "arvados-dispatch-keypair-" + md5keyFingerprint
+ _, err := instanceSet.client.ImportKeyPair(&ec2.ImportKeyPairInput{
+ KeyName: &keyname,
+ PublicKeyMaterial: ssh.MarshalAuthorizedKey(publicKey),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("Could not import keypair: %v", err)
+ }
+ }
+ instanceSet.keys[md5keyFingerprint] = keyname
+ }
+ instanceSet.keysMtx.Unlock()
+
+ ec2tags := []*ec2.Tag{
+ &ec2.Tag{
+ Key: aws.String(arvadosDispatchID),
+ Value: aws.String(string(instanceSet.dispatcherID)),
+ },
+ &ec2.Tag{
+ Key: aws.String("arvados-class"),
+ Value: aws.String("dynamic-compute"),
+ },
+ }
+ for k, v := range newTags {
+ ec2tags = append(ec2tags, &ec2.Tag{
+ Key: aws.String(tagPrefix + k),
+ Value: aws.String(v),
+ })
+ }
+
+ rii := ec2.RunInstancesInput{
+ ImageId: aws.String(string(imageID)),
+ InstanceType: &instanceType.ProviderType,
+ MaxCount: aws.Int64(1),
+ MinCount: aws.Int64(1),
+ KeyName: &keyname,
+
+ NetworkInterfaces: []*ec2.InstanceNetworkInterfaceSpecification{
+ &ec2.InstanceNetworkInterfaceSpecification{
+ AssociatePublicIpAddress: aws.Bool(false),
+ DeleteOnTermination: aws.Bool(true),
+ DeviceIndex: aws.Int64(0),
+ Groups: aws.StringSlice(instanceSet.ec2config.SecurityGroupIDs),
+ SubnetId: &instanceSet.ec2config.SubnetID,
+ }},
+ DisableApiTermination: aws.Bool(false),
+ InstanceInitiatedShutdownBehavior: aws.String("terminate"),
+ UserData: aws.String(base64.StdEncoding.EncodeToString([]byte("#!/bin/sh\n" + initCommand + "\n"))),
+ TagSpecifications: []*ec2.TagSpecification{
+ &ec2.TagSpecification{
+ ResourceType: aws.String("instance"),
+ Tags: ec2tags,
+ }},
+ }
+
+ if instanceType.AddedScratch > 0 {
+ rii.BlockDeviceMappings = []*ec2.BlockDeviceMapping{&ec2.BlockDeviceMapping{
+ DeviceName: aws.String("/dev/xvdt"),
+ Ebs: &ec2.EbsBlockDevice{
+ DeleteOnTermination: aws.Bool(true),
+ VolumeSize: aws.Int64((int64(instanceType.AddedScratch) + (1<<30 - 1)) >> 30),
+ VolumeType: &instanceSet.ec2config.EBSVolumeType,
+ }}}
+ }
+
+ if instanceType.Preemptible {
+ rii.InstanceMarketOptions = &ec2.InstanceMarketOptionsRequest{
+ MarketType: aws.String("spot"),
+ SpotOptions: &ec2.SpotMarketOptions{
+ InstanceInterruptionBehavior: aws.String("terminate"),
+ MaxPrice: aws.String(fmt.Sprintf("%v", instanceType.Price)),
+ }}
+ }
+
+ rsv, err := instanceSet.client.RunInstances(&rii)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &ec2Instance{
+ provider: instanceSet,
+ instance: rsv.Instances[0],
+ }, nil
+}
+
+func (instanceSet *ec2InstanceSet) Instances(cloud.InstanceTags) (instances []cloud.Instance, err error) {
+ dii := &ec2.DescribeInstancesInput{
+ Filters: []*ec2.Filter{&ec2.Filter{
+ Name: aws.String("tag:" + arvadosDispatchID),
+ Values: []*string{aws.String(string(instanceSet.dispatcherID))},
+ }}}
+
+ for {
+ dio, err := instanceSet.client.DescribeInstances(dii)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, rsv := range dio.Reservations {
+ for _, inst := range rsv.Instances {
+ if *inst.State.Name != "shutting-down" && *inst.State.Name != "terminated" {
+ instances = append(instances, &ec2Instance{instanceSet, inst})
+ }
+ }
+ }
+ if dio.NextToken == nil {
+ return instances, err
+ }
+ dii.NextToken = dio.NextToken
+ }
+}
+
+func (az *ec2InstanceSet) Stop() {
+}
+
+type ec2Instance struct {
+ provider *ec2InstanceSet
+ instance *ec2.Instance
+}
+
+func (inst *ec2Instance) ID() cloud.InstanceID {
+ return cloud.InstanceID(*inst.instance.InstanceId)
+}
+
+func (inst *ec2Instance) String() string {
+ return *inst.instance.InstanceId
+}
+
+func (inst *ec2Instance) ProviderType() string {
+ return *inst.instance.InstanceType
+}
+
+func (inst *ec2Instance) SetTags(newTags cloud.InstanceTags) error {
+ ec2tags := []*ec2.Tag{
+ &ec2.Tag{
+ Key: aws.String(arvadosDispatchID),
+ Value: aws.String(string(inst.provider.dispatcherID)),
+ },
+ }
+ for k, v := range newTags {
+ ec2tags = append(ec2tags, &ec2.Tag{
+ Key: aws.String(tagPrefix + k),
+ Value: aws.String(v),
+ })
+ }
+
+ _, err := inst.provider.client.CreateTags(&ec2.CreateTagsInput{
+ Resources: []*string{inst.instance.InstanceId},
+ Tags: ec2tags,
+ })
+
+ return err
+}
+
+func (inst *ec2Instance) Tags() cloud.InstanceTags {
+ tags := make(map[string]string)
+
+ for _, t := range inst.instance.Tags {
+ if strings.HasPrefix(*t.Key, tagPrefix) {
+ tags[(*t.Key)[len(tagPrefix):]] = *t.Value
+ }
+ }
+
+ return tags
+}
+
+func (inst *ec2Instance) Destroy() error {
+ _, err := inst.provider.client.TerminateInstances(&ec2.TerminateInstancesInput{
+ InstanceIds: []*string{inst.instance.InstanceId},
+ })
+ return err
+}
+
+func (inst *ec2Instance) Address() string {
+ if inst.instance.PrivateIpAddress != nil {
+ return *inst.instance.PrivateIpAddress
+ } else {
+ return ""
+ }
+}
+
+func (inst *ec2Instance) RemoteUser() string {
+ return inst.provider.ec2config.AdminUsername
+}
+
+func (inst *ec2Instance) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {
+ return cloud.ErrNotImplemented
+}
--- /dev/null
+// Copyright (C) The Arvados Authors. All rights reserved.
+//
+// SPDX-License-Identifier: AGPL-3.0
+//
+//
+// How to manually run individual tests against the real cloud:
+//
+// $ go test -v git.curoverse.com/arvados.git/lib/cloud/ec2 -live-ec2-cfg ec2config.yml -check.f=TestCreate
+//
+// Tests should be run individually and in the order they are listed in the file:
+//
+// Example azconfig.yml:
+//
+// ImageIDForTestSuite: ami-xxxxxxxxxxxxxxxxx
+// DriverParameters:
+// AccessKeyID: XXXXXXXXXXXXXX
+// SecretAccessKey: xxxxxxxxxxxxxxxxxxxx
+// Region: us-east-1
+// SecurityGroupIDs: [sg-xxxxxxxx]
+// SubnetID: subnet-xxxxxxxx
+// AdminUsername: crunch
+
+package ec2
+
+import (
+ "encoding/json"
+ "flag"
+ "testing"
+
+ "git.curoverse.com/arvados.git/lib/cloud"
+ "git.curoverse.com/arvados.git/lib/dispatchcloud/test"
+ "git.curoverse.com/arvados.git/sdk/go/arvados"
+ "git.curoverse.com/arvados.git/sdk/go/config"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/sirupsen/logrus"
+ check "gopkg.in/check.v1"
+)
+
+var live = flag.String("live-ec2-cfg", "", "Test with real EC2 API, provide config file")
+
+// Gocheck boilerplate
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+type EC2InstanceSetSuite struct{}
+
+var _ = check.Suite(&EC2InstanceSetSuite{})
+
+type testConfig struct {
+ ImageIDForTestSuite string
+ DriverParameters json.RawMessage
+}
+
+type ec2stub struct {
+}
+
+func (e *ec2stub) ImportKeyPair(input *ec2.ImportKeyPairInput) (*ec2.ImportKeyPairOutput, error) {
+ return nil, nil
+}
+
+func (e *ec2stub) DescribeKeyPairs(input *ec2.DescribeKeyPairsInput) (*ec2.DescribeKeyPairsOutput, error) {
+ return &ec2.DescribeKeyPairsOutput{}, nil
+}
+
+func (e *ec2stub) RunInstances(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
+ return &ec2.Reservation{Instances: []*ec2.Instance{&ec2.Instance{
+ InstanceId: aws.String("i-123"),
+ Tags: input.TagSpecifications[0].Tags,
+ }}}, nil
+}
+
+func (e *ec2stub) DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) {
+ return &ec2.DescribeInstancesOutput{}, nil
+}
+
+func (e *ec2stub) CreateTags(input *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
+ return nil, nil
+}
+
+func (e *ec2stub) TerminateInstances(input *ec2.TerminateInstancesInput) (*ec2.TerminateInstancesOutput, error) {
+ return nil, nil
+}
+
+func GetInstanceSet() (cloud.InstanceSet, cloud.ImageID, arvados.Cluster, error) {
+ cluster := arvados.Cluster{
+ InstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{
+ "tiny": arvados.InstanceType{
+ Name: "tiny",
+ ProviderType: "t2.micro",
+ VCPUs: 1,
+ RAM: 4000000000,
+ Scratch: 10000000000,
+ Price: .02,
+ Preemptible: false,
+ },
+ "tiny-with-extra-scratch": arvados.InstanceType{
+ Name: "tiny",
+ ProviderType: "t2.micro",
+ VCPUs: 1,
+ RAM: 4000000000,
+ Price: .02,
+ Preemptible: false,
+ AddedScratch: 20000000000,
+ },
+ "tiny-preemptible": arvados.InstanceType{
+ Name: "tiny",
+ ProviderType: "t2.micro",
+ VCPUs: 1,
+ RAM: 4000000000,
+ Scratch: 10000000000,
+ Price: .02,
+ Preemptible: true,
+ },
+ })}
+ if *live != "" {
+ var exampleCfg testConfig
+ err := config.LoadFile(&exampleCfg, *live)
+ if err != nil {
+ return nil, cloud.ImageID(""), cluster, err
+ }
+
+ ap, err := newEC2InstanceSet(exampleCfg.DriverParameters, "test123", logrus.StandardLogger())
+ return ap, cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err
+ }
+ ap := ec2InstanceSet{
+ ec2config: ec2InstanceSetConfig{},
+ dispatcherID: "test123",
+ logger: logrus.StandardLogger(),
+ client: &ec2stub{},
+ keys: make(map[string]string),
+ }
+ return &ap, cloud.ImageID("blob"), cluster, nil
+}
+
+func (*EC2InstanceSetSuite) TestCreate(c *check.C) {
+ ap, img, cluster, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+ c.Assert(err, check.IsNil)
+
+ inst, err := ap.Create(cluster.InstanceTypes["tiny"],
+ img, map[string]string{
+ "TestTagName": "test tag value",
+ }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+
+ c.Assert(err, check.IsNil)
+
+ tags := inst.Tags()
+ c.Check(tags["TestTagName"], check.Equals, "test tag value")
+ c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+
+}
+
+func (*EC2InstanceSetSuite) TestCreateWithExtraScratch(c *check.C) {
+ ap, img, cluster, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+ c.Assert(err, check.IsNil)
+
+ inst, err := ap.Create(cluster.InstanceTypes["tiny-with-extra-scratch"],
+ img, map[string]string{
+ "TestTagName": "test tag value",
+ }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+
+ c.Assert(err, check.IsNil)
+
+ tags := inst.Tags()
+ c.Check(tags["TestTagName"], check.Equals, "test tag value")
+ c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+
+}
+
+func (*EC2InstanceSetSuite) TestCreatePreemptible(c *check.C) {
+ ap, img, cluster, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ pk, _ := test.LoadTestKey(c, "../../dispatchcloud/test/sshkey_dispatch")
+ c.Assert(err, check.IsNil)
+
+ inst, err := ap.Create(cluster.InstanceTypes["tiny-preemptible"],
+ img, map[string]string{
+ "TestTagName": "test tag value",
+ }, "umask 0600; echo -n test-file-data >/var/run/test-file", pk)
+
+ c.Assert(err, check.IsNil)
+
+ tags := inst.Tags()
+ c.Check(tags["TestTagName"], check.Equals, "test tag value")
+ c.Logf("inst.String()=%v Address()=%v Tags()=%v", inst.String(), inst.Address(), tags)
+
+}
+
+func (*EC2InstanceSetSuite) TestTagInstances(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ l, err := ap.Instances(nil)
+ c.Assert(err, check.IsNil)
+
+ for _, i := range l {
+ tg := i.Tags()
+ tg["TestTag2"] = "123 test tag 2"
+ c.Check(i.SetTags(tg), check.IsNil)
+ }
+}
+
+func (*EC2InstanceSetSuite) TestListInstances(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider: ", err)
+ }
+
+ l, err := ap.Instances(nil)
+
+ c.Assert(err, check.IsNil)
+
+ for _, i := range l {
+ tg := i.Tags()
+ c.Logf("%v %v %v", i.String(), i.Address(), tg)
+ }
+}
+
+func (*EC2InstanceSetSuite) TestDestroyInstances(c *check.C) {
+ ap, _, _, err := GetInstanceSet()
+ if err != nil {
+ c.Fatal("Error making provider", err)
+ }
+
+ l, err := ap.Instances(nil)
+ c.Assert(err, check.IsNil)
+
+ for _, i := range l {
+ c.Check(i.Destroy(), check.IsNil)
+ }
+}
"git.curoverse.com/arvados.git/lib/cloud"
"git.curoverse.com/arvados.git/lib/cloud/azure"
+ "git.curoverse.com/arvados.git/lib/cloud/ec2"
"git.curoverse.com/arvados.git/sdk/go/arvados"
"github.com/sirupsen/logrus"
)
var drivers = map[string]cloud.Driver{
"azure": azure.Driver,
+ "ec2": ec2.Driver,
}
func newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger) (cloud.InstanceSet, error) {
// private state
subscribers map[<-chan struct{}]chan<- struct{}
- creating map[arvados.InstanceType][]time.Time // start times of unfinished (InstanceSet)Create calls
+ creating map[string]createCall // unfinished (cloud.InstanceSet)Create calls (key is instance secret)
workers map[cloud.InstanceID]*worker
loaded bool // loaded list of instances from InstanceSet at least once
exited map[string]time.Time // containers whose crunch-run proc has exited, but KillContainer has not been called
mMemory *prometheus.GaugeVec
}
+type createCall struct {
+ time time.Time
+ instanceType arvados.InstanceType
+}
+
// Subscribe returns a buffered channel that becomes ready after any
// change to the pool's state that could have scheduling implications:
// a worker's state changes, a new worker appears, the cloud
defer wp.mtx.RUnlock()
unalloc := map[arvados.InstanceType]int{}
creating := map[arvados.InstanceType]int{}
- for it, times := range wp.creating {
- creating[it] = len(times)
+ oldestCreate := map[arvados.InstanceType]time.Time{}
+ for _, cc := range wp.creating {
+ it := cc.instanceType
+ creating[it]++
+ if t, ok := oldestCreate[it]; !ok || t.After(cc.time) {
+ oldestCreate[it] = cc.time
+ }
}
for _, wkr := range wp.workers {
// Skip workers that are not expected to become
}
it := wkr.instType
unalloc[it]++
- if wkr.state == StateUnknown && creating[it] > 0 && wkr.appeared.After(wp.creating[it][0]) {
+ if wkr.state == StateUnknown && creating[it] > 0 && wkr.appeared.After(oldestCreate[it]) {
// If up to N new workers appear in
// Instances() while we are waiting for N
// Create() calls to complete, we assume we're
return false
}
now := time.Now()
- wp.creating[it] = append(wp.creating[it], now)
+ secret := randomHex(instanceSecretLength)
+ wp.creating[secret] = createCall{time: now, instanceType: it}
go func() {
defer wp.notify()
- secret := randomHex(instanceSecretLength)
tags := cloud.InstanceTags{
tagKeyInstanceType: it.Name,
tagKeyIdleBehavior: string(IdleBehaviorRun),
inst, err := wp.instanceSet.Create(it, wp.imageID, tags, initCmd, wp.installPublicKey)
wp.mtx.Lock()
defer wp.mtx.Unlock()
- // Remove our timestamp marker from wp.creating
- for i, t := range wp.creating[it] {
- if t == now {
- copy(wp.creating[it][i:], wp.creating[it][i+1:])
- wp.creating[it] = wp.creating[it][:len(wp.creating[it])-1]
- break
- }
- }
+ // delete() is deferred so the updateWorker() call
+ // below knows to use StateBooting when adding a new
+ // worker.
+ defer delete(wp.creating, secret)
if err != nil {
if err, ok := err.(cloud.QuotaError); ok && err.IsQuotaError() {
wp.atQuotaErr = err
wp.instanceSet.throttleCreate.CheckRateLimitError(err, wp.logger, "create instance", wp.notify)
return
}
- wp.updateWorker(inst, it, StateBooting)
+ wp.updateWorker(inst, it)
}()
return true
}
return nil
}
-// Add or update worker attached to the given instance. Use
-// initialState if a new worker is created.
+// Add or update worker attached to the given instance.
//
// The second return value is true if a new worker is created.
//
+// A newly added instance has state=StateBooting if its tags match an
+// entry in wp.creating, otherwise StateUnknown.
+//
// Caller must have lock.
-func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType, initialState State) (*worker, bool) {
+func (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*worker, bool) {
inst = tagVerifier{inst}
id := inst.ID()
if wkr := wp.workers[id]; wkr != nil {
wkr.executor.SetTarget(inst)
wkr.instance = inst
wkr.updated = time.Now()
- if initialState == StateBooting && wkr.state == StateUnknown {
- wkr.state = StateBooting
- }
wkr.saveTags()
return wkr, false
}
+ state := StateUnknown
+ if _, ok := wp.creating[inst.Tags()[tagKeyInstanceSecret]]; ok {
+ state = StateBooting
+ }
+
// If an instance has a valid IdleBehavior tag when it first
// appears, initialize the new worker accordingly (this is how
// we restore IdleBehavior that was set by a prior dispatch
"Address": inst.Address(),
})
logger.WithFields(logrus.Fields{
- "State": initialState,
+ "State": state,
"IdleBehavior": idleBehavior,
}).Infof("instance appeared in cloud")
now := time.Now()
wp: wp,
logger: logger,
executor: wp.newExecutor(inst),
- state: initialState,
+ state: state,
idleBehavior: idleBehavior,
instance: inst,
instType: it,
func (wp *Pool) runMetrics() {
ch := wp.Subscribe()
defer wp.Unsubscribe(ch)
+ wp.updateMetrics()
for range ch {
wp.updateMetrics()
}
}
func (wp *Pool) setup() {
- wp.creating = map[arvados.InstanceType][]time.Time{}
+ wp.creating = map[string]createCall{}
wp.exited = map[string]time.Time{}
wp.workers = map[cloud.InstanceID]*worker{}
wp.subscribers = map[<-chan struct{}]chan<- struct{}{}
wp.logger.WithField("Instance", inst).Errorf("unknown InstanceType tag %q --- ignoring", itTag)
continue
}
- if wkr, isNew := wp.updateWorker(inst, it, StateUnknown); isNew {
+ if wkr, isNew := wp.updateWorker(inst, it); isNew {
notify = true
} else if wkr.state == StateShutdown && time.Since(wkr.destroyed) > wp.timeoutShutdown {
wp.logger.WithField("Instance", inst).Info("worker still listed after shutdown; retrying")
logger = logger.WithField("Instance", wkr.instance.ID())
logger.Debug("starting container")
wkr.starting[ctr.UUID] = struct{}{}
- wkr.state = StateRunning
+ if wkr.state != StateRunning {
+ wkr.state = StateRunning
+ go wkr.wp.notify()
+ }
go func() {
env := map[string]string{
"ARVADOS_API_HOST": wkr.wp.arvClient.APIHost,
if record["output_uuid"]:
if self.arvrunner.trash_intermediate or self.arvrunner.intermediate_output_ttl:
# Compute the trash time to avoid requesting the collection record.
- trash_at = ciso8601.parse_datetime(record["modified_at"]) + datetime.timedelta(0, self.arvrunner.intermediate_output_ttl)
+ trash_at = ciso8601.parse_datetime_as_naive(record["modified_at"]) + datetime.timedelta(0, self.arvrunner.intermediate_output_ttl)
aftertime = " at %s" % trash_at.strftime("%Y-%m-%d %H:%M:%S UTC") if self.arvrunner.intermediate_output_ttl else ""
orpart = ", or" if self.arvrunner.trash_intermediate and self.arvrunner.intermediate_output_ttl else ""
oncomplete = " upon successful completion of the workflow" if self.arvrunner.trash_intermediate else ""
def __init__(self, runtime_status_update_func):
super(RuntimeStatusLoggingHandler, self).__init__()
self.runtime_status_update = runtime_status_update_func
+ self.updatingRuntimeStatus = False
def emit(self, record):
kind = None
kind = 'error'
elif record.levelno >= logging.WARNING:
kind = 'warning'
- if kind is not None:
- log_msg = record.getMessage()
- if '\n' in log_msg:
- # If the logged message is multi-line, use its first line as status
- # and the rest as detail.
- status, detail = log_msg.split('\n', 1)
- self.runtime_status_update(
- kind,
- "%s: %s" % (record.name, status),
- detail
- )
- else:
- self.runtime_status_update(
- kind,
- "%s: %s" % (record.name, record.getMessage())
- )
+ if kind is not None and self.updatingRuntimeStatus is not True:
+ self.updatingRuntimeStatus = True
+ try:
+ log_msg = record.getMessage()
+ if '\n' in log_msg:
+ # If the logged message is multi-line, use its first line as status
+ # and the rest as detail.
+ status, detail = log_msg.split('\n', 1)
+ self.runtime_status_update(
+ kind,
+ "%s: %s" % (record.name, status),
+ detail
+ )
+ else:
+ self.runtime_status_update(
+ kind,
+ "%s: %s" % (record.name, record.getMessage())
+ )
+ finally:
+ self.updatingRuntimeStatus = False
+
class ArvCwlExecutor(object):
"""Execute a CWL tool or workflow, submit work (using either jobs or
return loadingContext, runtimeContext
+ # Helper function to set up the ArvCwlExecutor to use the containers api
+ # and test that the RuntimeStatusLoggingHandler is set up correctly
+ def setup_and_test_container_executor_and_logging(self, gcc_mock) :
+ api = mock.MagicMock()
+ api._rootDesc = copy.deepcopy(get_rootDesc())
+ del api._rootDesc.get('resources')['jobs']['methods']['create']
+
+ # Make sure ArvCwlExecutor thinks it's running inside a container so it
+ # adds the logging handler that will call runtime_status_update() mock
+ self.assertFalse(gcc_mock.called)
+ runner = arvados_cwl.ArvCwlExecutor(api)
+ self.assertEqual(runner.work_api, 'containers')
+ root_logger = logging.getLogger('')
+ handlerClasses = [h.__class__ for h in root_logger.handlers]
+ self.assertTrue(arvados_cwl.RuntimeStatusLoggingHandler in handlerClasses)
+ return runner
+
# The test passes no builder.resources
# Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}
@mock.patch("arvados.commands.keepdocker.list_images_in_arv")
arvjob.output_callback.assert_called_with({"out": "stuff"}, "success")
runner.add_intermediate_output.assert_called_with("zzzzz-4zz18-zzzzzzzzzzzzzz2")
+ # Test to make sure we dont call runtime_status_update if we already did
+ # some where higher up in the call stack
@mock.patch("arvados_cwl.util.get_current_container")
- @mock.patch("arvados.collection.CollectionReader")
- @mock.patch("arvados.collection.Collection")
- def test_child_failure(self, col, reader, gcc_mock):
- api = mock.MagicMock()
- api._rootDesc = copy.deepcopy(get_rootDesc())
- del api._rootDesc.get('resources')['jobs']['methods']['create']
+ def test_recursive_runtime_status_update(self, gcc_mock):
+ self.setup_and_test_container_executor_and_logging(gcc_mock)
+ root_logger = logging.getLogger('')
- # Set up runner with mocked runtime_status_update()
- self.assertFalse(gcc_mock.called)
- runtime_status_update = mock.MagicMock()
- arvados_cwl.ArvCwlExecutor.runtime_status_update = runtime_status_update
- runner = arvados_cwl.ArvCwlExecutor(api)
- self.assertEqual(runner.work_api, 'containers')
+ # get_current_container is invoked when we call runtime_status_update
+ # so try and log again!
+ gcc_mock.side_effect = lambda *args: root_logger.error("Second Error")
+ try:
+ root_logger.error("First Error")
+ except RuntimeError:
+ self.fail("RuntimeStatusLoggingHandler should not be called recursively")
- # Make sure ArvCwlExecutor thinks it's running inside a container so it
- # adds the logging handler that will call runtime_status_update() mock
+ @mock.patch("arvados_cwl.ArvCwlExecutor.runtime_status_update")
+ @mock.patch("arvados_cwl.util.get_current_container")
+ @mock.patch("arvados.collection.CollectionReader")
+ @mock.patch("arvados.collection.Collection")
+ def test_child_failure(self, col, reader, gcc_mock, rts_mock):
+ runner = self.setup_and_test_container_executor_and_logging(gcc_mock)
+
gcc_mock.return_value = {"uuid" : "zzzzz-dz642-zzzzzzzzzzzzzzz"}
self.assertTrue(gcc_mock.called)
- root_logger = logging.getLogger('')
- handlerClasses = [h.__class__ for h in root_logger.handlers]
- self.assertTrue(arvados_cwl.RuntimeStatusLoggingHandler in handlerClasses)
runner.num_retries = 0
runner.ignore_docker_for_reuse = False
"modified_at": "2017-05-26T12:01:22Z"
})
- runtime_status_update.assert_called_with(
+ rts_mock.assert_called_with(
'error',
'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',
' ** log is empty **'
}
type InstanceType struct {
- Name string
- ProviderType string
- VCPUs int
- RAM ByteSize
- Scratch ByteSize
- Price float64
- Preemptible bool
+ Name string
+ ProviderType string
+ VCPUs int
+ RAM ByteSize
+ Scratch ByteSize
+ IncludedScratch ByteSize
+ AddedScratch ByteSize
+ Price float64
+ Preemptible bool
}
type Dispatch struct {
if t.ProviderType == "" {
t.ProviderType = t.Name
}
+ if t.Scratch == 0 {
+ t.Scratch = t.IncludedScratch + t.AddedScratch
+ } else if t.AddedScratch == 0 {
+ t.AddedScratch = t.Scratch - t.IncludedScratch
+ } else if t.IncludedScratch == 0 {
+ t.IncludedScratch = t.Scratch - t.AddedScratch
+ }
+
+ if t.Scratch != (t.IncludedScratch + t.AddedScratch) {
+ return fmt.Errorf("%v: Scratch != (IncludedScratch + AddedScratch)", t.Name)
+ }
(*it)[t.Name] = t
}
return nil
Docker metadata links to sort them from least to most preferred.
"""
try:
- image_timestamp = ciso8601.parse_datetime(
+ image_timestamp = ciso8601.parse_datetime_as_naive(
link['properties']['image_timestamp'])
except (KeyError, ValueError):
image_timestamp = EARLIEST_DATETIME
try:
- created_timestamp = ciso8601.parse_datetime(link['created_at'])
+ created_timestamp = ciso8601.parse_datetime_as_naive(link['created_at'])
except ValueError:
created_timestamp = None
return (image_timestamp, created_timestamp)
gem 'themes_for_rails', git: 'https://github.com/curoverse/themes_for_rails'
-gem 'arvados', '>= 0.1.20150615153458'
+gem 'arvados', '>= 1.3.1.20190301212059'
gem 'httpclient'
gem 'sshkey'
activemodel (>= 3.0.0)
activesupport (>= 3.0.0)
rack (>= 1.1.0)
- addressable (2.5.2)
+ addressable (2.6.0)
public_suffix (>= 2.0.2, < 4.0)
andand (1.3.3)
arel (6.0.4)
- arvados (0.1.20180302192246)
+ arvados (1.3.1.20190301212059)
activesupport (>= 3)
andand (~> 1.3, >= 1.3.3)
- google-api-client (>= 0.7, < 0.8.9)
+ cure-google-api-client (>= 0.7, < 0.8.9)
i18n (~> 0)
json (>= 1.7.7, < 3)
jwt (>= 0.1.5, < 2)
- arvados-cli (1.1.4.20180723133344)
+ arvados-cli (1.3.1.20190211211047)
activesupport (>= 3.2.13, < 5)
andand (~> 1.3, >= 1.3.3)
- arvados (~> 0.1, >= 0.1.20150128223554)
+ arvados (~> 1.3.0, >= 1.3.0)
curb (~> 0.8)
- google-api-client (~> 0.6, >= 0.6.3, < 0.8.9)
+ cure-google-api-client (~> 0.6, >= 0.6.3, < 0.8.9)
json (>= 1.7.7, < 3)
oj (~> 3.0)
- trollop (~> 2.0)
+ optimist (~> 3.0)
autoparse (0.3.3)
addressable (>= 2.3.1)
extlib (>= 0.9.15)
coffee-script-source (1.12.2)
concurrent-ruby (1.1.4)
crass (1.0.4)
- curb (0.9.6)
+ curb (0.9.8)
+ cure-google-api-client (0.8.7.1)
+ activesupport (>= 3.2, < 5.0)
+ addressable (~> 2.3)
+ autoparse (~> 0.3)
+ extlib (~> 0.9)
+ faraday (~> 0.9)
+ googleauth (~> 0.3)
+ launchy (~> 2.4)
+ multi_json (~> 1.10)
+ retriable (~> 1.4)
+ signet (~> 0.6)
database_cleaner (1.7.0)
erubis (2.7.0)
eventmachine (1.2.6)
websocket-driver (>= 0.5.1)
globalid (0.4.1)
activesupport (>= 4.2.0)
- google-api-client (0.8.7)
- activesupport (>= 3.2, < 5.0)
- addressable (~> 2.3)
- autoparse (~> 0.3)
- extlib (~> 0.9)
- faraday (~> 0.9)
- googleauth (~> 0.3)
- launchy (~> 2.4)
- multi_json (~> 1.10)
- retriable (~> 1.4)
- signet (~> 0.6)
- googleauth (0.6.2)
+ googleauth (0.8.0)
faraday (~> 0.12)
jwt (>= 1.4, < 3.0)
- logging (~> 2.0)
- memoist (~> 0.12)
+ memoist (~> 0.16)
multi_json (~> 1.11)
- os (~> 0.9)
+ os (>= 0.9, < 2.0)
signet (~> 0.7)
hashie (3.5.7)
highline (1.7.10)
rails-dom-testing (>= 1, < 3)
railties (>= 4.2.0)
thor (>= 0.14, < 2.0)
- json (2.1.0)
+ json (2.2.0)
jwt (1.5.6)
launchy (2.4.3)
addressable (~> 2.3)
libv8 (3.16.14.19)
- little-plugger (1.1.4)
- logging (2.2.2)
- little-plugger (~> 1.1)
- multi_json (~> 1.10)
lograge (0.10.0)
actionpack (>= 4)
activesupport (>= 4)
multi_json (~> 1.3)
multi_xml (~> 0.5)
rack (>= 1.2, < 3)
- oj (3.6.4)
+ oj (3.7.9)
omniauth (1.4.3)
hashie (>= 1.2, < 4)
rack (>= 1.6.2, < 3)
omniauth-oauth2 (1.5.0)
oauth2 (~> 1.1)
omniauth (~> 1.2)
- os (0.9.6)
+ optimist (3.0.0)
+ os (1.0.0)
passenger (5.3.0)
rack
rake (>= 0.8.1)
power_assert (1.1.1)
protected_attributes (1.1.4)
activemodel (>= 4.0.1, < 5.0)
- public_suffix (3.0.2)
+ public_suffix (3.0.3)
rack (1.6.11)
rack-test (0.6.3)
rack (>= 1.0)
sass (~> 3.2.2)
sprockets (~> 2.8, < 3.0)
sprockets-rails (~> 2.0)
- signet (0.8.1)
+ signet (0.11.0)
addressable (~> 2.3)
faraday (~> 0.9)
jwt (>= 1.5, < 3.0)
thor (0.20.3)
thread_safe (0.3.6)
tilt (1.4.1)
- trollop (2.1.2)
+ trollop (2.9.9)
tzinfo (1.2.5)
thread_safe (~> 0.1)
uglifier (2.7.2)
activerecord-deprecated_finders
acts_as_api
andand
- arvados (>= 0.1.20150615153458)
+ arvados (>= 1.3.1.20190301212059)
arvados-cli
coffee-rails (~> 4.0)
database_cleaner
# SPDX-License-Identifier: AGPL-3.0
require 'whitelist_update'
+require 'arvados/collection'
class ContainerRequest < ArvadosModel
include ArvadosModelUpdates
coll = Collection.new(
owner_uuid: self.owner_uuid,
name: coll_name,
+ manifest_text: "",
properties: {
'type' => out_type,
'container_request' => uuid,
})
end
+
+ if out_type == "log"
+ src = Arv::Collection.new(manifest)
+ dst = Arv::Collection.new(coll.manifest_text)
+ dst.cp_r("./", ".", src)
+ dst.cp_r("./", "log for container #{container.uuid}", src)
+ manifest = dst.manifest_text
+ end
+
coll.assign_attributes(
- portable_data_hash: pdh,
+ portable_data_hash: Digest::MD5.hexdigest(manifest) + '+' + manifest.bytesize.to_s,
manifest_text: manifest,
trash_at: trash_at,
delete_at: trash_at)
return false
else
self.container_count += 1
+ if self.container_uuid_was
+ old_container = Container.find_by_uuid(self.container_uuid_was)
+ old_logs = Collection.where(portable_data_hash: old_container.log).first
+ if old_logs
+ log_coll = self.log_uuid.nil? ? nil : Collection.where(uuid: self.log_uuid).first
+ if self.log_uuid.nil?
+ log_coll = Collection.new(
+ owner_uuid: self.owner_uuid,
+ name: coll_name = "Container log for request #{uuid}",
+ manifest_text: "")
+ end
+
+ # copy logs from old container into CR's log collection
+ src = Arv::Collection.new(old_logs.manifest_text)
+ dst = Arv::Collection.new(log_coll.manifest_text)
+ dst.cp_r("./", "log for container #{old_container.uuid}", src)
+ manifest = dst.manifest_text
+
+ log_coll.assign_attributes(
+ portable_data_hash: Digest::MD5.hexdigest(manifest) + '+' + manifest.bytesize.to_s,
+ manifest_text: manifest)
+ log_coll.save_with_unique_name!
+ self.log_uuid = log_coll.uuid
+ end
+ end
end
end
end
require 'test_helper'
require 'helpers/container_test_helper'
require 'helpers/docker_migration_helper'
+require 'arvados/collection'
class ContainerRequestTest < ActiveSupport::TestCase
include DockerMigrationHelper
cr.reload
assert_equal "Final", cr.state
assert_equal users(:active).uuid, cr.modified_by_user_uuid
- ['output', 'log'].each do |out_type|
- pdh = Container.find_by_uuid(cr.container_uuid).send(out_type)
- assert_equal(1, Collection.where(portable_data_hash: pdh,
- owner_uuid: project.uuid).count,
- "Container #{out_type} should be copied to #{project.uuid}")
- end
+
assert_not_nil cr.output_uuid
assert_not_nil cr.log_uuid
output = Collection.find_by_uuid cr.output_uuid
assert_equal output_pdh, output.portable_data_hash
+ assert_equal output.owner_uuid, project.uuid, "Container output should be copied to #{project.uuid}"
+
log = Collection.find_by_uuid cr.log_uuid
- assert_equal log_pdh, log.portable_data_hash
+ assert_equal log.manifest_text, ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{cr.container_uuid} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n"
+
+ assert_equal log.owner_uuid, project.uuid, "Container log should be copied to #{project.uuid}"
end
test "Container makes container request, then is cancelled" do
cr.reload
assert_equal "Final", cr.state
assert_equal prev_container_uuid, cr.container_uuid
+ end
+
+
+ test "Retry saves logs from previous attempts" do
+ set_user_from_auth :active
+ cr = create_minimal_req!(priority: 1, state: "Committed", container_count_max: 3)
+
+ c = act_as_system_user do
+ c = Container.find_by_uuid(cr.container_uuid)
+ c.update_attributes!(state: Container::Locked)
+ c.update_attributes!(state: Container::Running)
+ c
+ end
+
+ container_uuids = []
+
+ [0, 1, 2].each do
+ cr.reload
+ assert_equal "Committed", cr.state
+ container_uuids << cr.container_uuid
+
+ c = act_as_system_user do
+ logc = Collection.new(manifest_text: ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n")
+ logc.save!
+ c = Container.find_by_uuid(cr.container_uuid)
+ c.update_attributes!(state: Container::Cancelled, log: logc.portable_data_hash)
+ c
+ end
+ end
+
+ container_uuids.sort!
+
+ cr.reload
+ assert_equal "Final", cr.state
+ assert_equal 3, cr.container_count
+ assert_equal ". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{container_uuids[0]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{container_uuids[1]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+./log\\040for\\040container\\040#{container_uuids[2]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar
+" , Collection.find_by_uuid(cr.log_uuid).manifest_text
end
cr2.reload
assert_equal cr1log_uuid, cr1.log_uuid
assert_equal cr2log_uuid, cr2.log_uuid
- assert_equal [logpdh_time2], Collection.where(uuid: [cr1log_uuid, cr2log_uuid]).to_a.collect(&:portable_data_hash).uniq
+ assert_equal 1, Collection.where(uuid: [cr1log_uuid, cr2log_uuid]).to_a.collect(&:portable_data_hash).uniq.length
+ assert_equal ". acbd18db4cc2f85cedef654fccc4a4d8+3 cdd549ae79fe6640fa3d5c6261d8303c+195 0:3:foo.txt 3:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt
+./log\\040for\\040container\\040#{cr1.container_uuid} acbd18db4cc2f85cedef654fccc4a4d8+3 cdd549ae79fe6640fa3d5c6261d8303c+195 0:3:foo.txt 3:195:zzzzz-8i9sb-0vsrcqi7whchuil.log.txt
+", Collection.find_by_uuid(cr1log_uuid).manifest_text
end
["auth_uuid", "runtime_token"].each do |tok|
if not t:
return 0
try:
- return calendar.timegm(ciso8601.parse_datetime(t).timetuple())
+ return calendar.timegm(ciso8601.parse_datetime_as_naive(t).timetuple())
except (TypeError, ValueError):
return 0
cw.write("data 8")
cw.start_new_stream('edgecases')
- for f in ":/.../-/*/\x01\\/ ".split("/"):
+ for f in ":/.../-/*/ ".split("/"):
cw.start_new_file(f)
cw.write('x')
- for f in ":/.../-/*/\x01\\/ ".split("/"):
+ for f in ":/.../-/*/ ".split("/"):
cw.start_new_stream('edgecases/dirs/' + f)
cw.start_new_file('x/x')
cw.write('x')
self.assertDirContents('dir2', ['thing5.txt', 'thing6.txt', 'dir3'])
self.assertDirContents('dir2/dir3', ['thing7.txt', 'thing8.txt'])
self.assertDirContents('edgecases',
- "dirs/:/.../-/*/\x01\\/ ".split("/"))
+ "dirs/:/.../-/*/ ".split("/"))
self.assertDirContents('edgecases/dirs',
- ":/.../-/*/\x01\\/ ".split("/"))
+ ":/.../-/*/ ".split("/"))
files = {'thing1.txt': 'data 1',
'thing2.txt': 'data 2',
"revision": "78439966b38d69bf38227fbf57ac8a6fee70f69a",
"revisionTime": "2017-08-04T20:09:54Z"
},
+ {
+ "checksumSHA1": "k59wLJfyqGB04o238WhKSAzSz9M=",
+ "path": "github.com/aws/aws-sdk-go/aws",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
+ "path": "github.com/aws/aws-sdk-go/aws/awserr",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "PEDqMAEPxlh9Y8/dIbHlE6A7LEA=",
+ "path": "github.com/aws/aws-sdk-go/aws/awsutil",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "KpW2B6W3J1yB/7QJWjjtsKz1Xbc=",
+ "path": "github.com/aws/aws-sdk-go/aws/client",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "uEJU4I6dTKaraQKvrljlYKUZwoc=",
+ "path": "github.com/aws/aws-sdk-go/aws/client/metadata",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "GvmthjOyNZGOKmXK4XVrbT5+K9I=",
+ "path": "github.com/aws/aws-sdk-go/aws/corehandlers",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "QHizt8XKUpuslIZv6EH6ENiGpGA=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "JTilCBYWVAfhbKSnrxCNhE8IFns=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "1pENtl2K9hG7qoB7R6J7dAHa82g=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "sPtOSV32SZr2xN7vZlF4FXo43/o=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/processcreds",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "JEYqmF83O5n5bHkupAzA6STm0no=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "3pJft1H34eTYK6s6p3ijj3mGtc4=",
+ "path": "github.com/aws/aws-sdk-go/aws/csm",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "7AmyyJXVkMdmy8dphC3Nalx5XkI=",
+ "path": "github.com/aws/aws-sdk-go/aws/defaults",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "47hnR1KYqZDBT3xmHuS7cNtqHP8=",
+ "path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "pcWH1AkR7sUs84cN/XTD9Jexf2Q=",
+ "path": "github.com/aws/aws-sdk-go/aws/endpoints",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "nhavXPspOdqm5iAvIGgmZmXk4aI=",
+ "path": "github.com/aws/aws-sdk-go/aws/request",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "w4tSwNFNJ4cGgjYEdAgsDnikqec=",
+ "path": "github.com/aws/aws-sdk-go/aws/session",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "C9uAu9gsLIpJGIX6/5P+n3s9wQo=",
+ "path": "github.com/aws/aws-sdk-go/aws/signer/v4",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "Fe2TPw9X2UvlkRaOS7LPJlpkuTo=",
+ "path": "github.com/aws/aws-sdk-go/internal/ini",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "wjxQlU1PYxrDRFoL1Vek8Wch7jk=",
+ "path": "github.com/aws/aws-sdk-go/internal/sdkio",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "MYLldFRnsZh21TfCkgkXCT3maPU=",
+ "path": "github.com/aws/aws-sdk-go/internal/sdkrand",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "tQVg7Sz2zv+KkhbiXxPH0mh9spg=",
+ "path": "github.com/aws/aws-sdk-go/internal/sdkuri",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "sXiZ5x6j2FvlIO57pboVnRTm7QA=",
+ "path": "github.com/aws/aws-sdk-go/internal/shareddefaults",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "NtXXi501Kou3laVAsJfcbKSkNI8=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "0cZnOaE1EcFUuiu4bdHV2k7slQg=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "lj56XJFI2OSp+hEOrFZ+eiEi/yM=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/query",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "+O6A945eTP9plLpkEMZB0lwBAcg=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "RDOk9se2S83/HAYmWnpoW3bgQfQ=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/rest",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "B8unEuOlpQfnig4cMyZtXLZVVOs=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "uvEbLM/ZodhtEUVTEoC+Lbc9PHg=",
+ "path": "github.com/aws/aws-sdk-go/service/ec2",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
+ {
+ "checksumSHA1": "HMY+b4YBLVvWoKm5vB+H7tpKiTI=",
+ "path": "github.com/aws/aws-sdk-go/service/sts",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
{
"checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=",
"path": "github.com/beorn7/perks/quantile",
"revision": "2bb1b664bcff821e02b2a0644cd29c7e824d54f8",
"revisionTime": "2015-08-17T12:26:01Z"
},
+ {
+ "checksumSHA1": "blwbl9vPvRLtL5QlZgfpLvsFiZ4=",
+ "origin": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath",
+ "path": "github.com/jmespath/go-jmespath",
+ "revision": "d496c5aab9b8ba36936e457a488e971b4f9fd891",
+ "revisionTime": "2019-03-06T20:18:39Z"
+ },
{
"checksumSHA1": "X7g98YfLr+zM7aN76AZvAfpZyfk=",
"path": "github.com/julienschmidt/httprouter",