X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/fd5a11b21af83870c4122afdf94844dd4cf63cc6..b026643583d835a15d3baf6edef2df16ce678307:/sdk/cli/bin/crunch-job diff --git a/sdk/cli/bin/crunch-job b/sdk/cli/bin/crunch-job index 5eb2f902f9..ca6c47bfce 100755 --- a/sdk/cli/bin/crunch-job +++ b/sdk/cli/bin/crunch-job @@ -126,6 +126,7 @@ my $jobspec; my $job_api_token; my $no_clear_tmp; my $resume_stash; +my $cgroup_root = "/sys/fs/cgroup"; my $docker_bin = "docker.io"; my $docker_run_args = ""; GetOptions('force-unlock' => \$force_unlock, @@ -134,6 +135,7 @@ GetOptions('force-unlock' => \$force_unlock, 'job-api-token=s' => \$job_api_token, 'no-clear-tmp' => \$no_clear_tmp, 'resume-stash=s' => \$resume_stash, + 'cgroup-root=s' => \$cgroup_root, 'docker-bin=s' => \$docker_bin, 'docker-run-args=s' => \$docker_run_args, ); @@ -415,11 +417,13 @@ if (!defined $no_clear_tmp) { # If this job requires a Docker image, install that. my ($docker_locator, $docker_stream, $docker_hash, $docker_limitmem, $dockeruserarg); if ($docker_locator = $Job->{docker_image_locator}) { + Log (undef, "Install docker image $docker_locator"); ($docker_stream, $docker_hash) = find_docker_image($docker_locator); if (!$docker_hash) { croak("No Docker image hash found from locator $docker_locator"); } + Log (undef, "docker image hash is $docker_hash"); $docker_stream =~ s/^\.//; my $docker_install_script = qq{ if ! $docker_bin images -q --no-trunc --all | grep -qxF \Q$docker_hash\E; then @@ -430,7 +434,7 @@ fi if ($docker_pid == 0) { srun (["srun", "--nodelist=" . join(',', @node)], - ["/bin/sh", "-ec", $docker_install_script]); + ["/bin/bash", "-o", "pipefail", "-ec", $docker_install_script]); exit ($?); } while (1) @@ -441,8 +445,8 @@ fi } if ($? != 0) { - croak("Installing Docker image from $docker_locator exited " - .exit_status_s($?)); + Log(undef, "Installing Docker image from $docker_locator exited " . exit_status_s($?)); + exit(EX_RETRY_UNLOCKED); } # Determine whether this version of Docker supports memory+swap limits. @@ -913,7 +917,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) { my $containername = "$Jobstep->{arvados_task}->{uuid}-$Jobstep->{failures}"; my $cidfile = "$ENV{CRUNCH_TMP}/$containername.cid"; - $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 "; + $command .= "crunchstat -cgroup-root=\Q$cgroup_root\E -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 "; $command .= "$docker_bin run $docker_run_args --name=$containername --attach=stdout --attach=stderr --attach=stdin -i \Q$dockeruserarg\E --cidfile=$cidfile --sig-proxy "; # We only set memory limits if Docker lets us limit both memory and swap. # Memory limits alone have been supported longer, but subprocesses tend @@ -993,7 +997,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) } } else { # Non-docker run - $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "; + $command .= "crunchstat -cgroup-root=\Q$cgroup_root\E -poll=10000 "; $command .= $stdbuf; $command .= "perl - $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"}; } @@ -1057,12 +1061,14 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) check_refresh_wanted(); check_squeue(); update_progress_stats(); - select (undef, undef, undef, 0.1); } elsif (time - $progress_stats_updated >= 30 || $progress_is_dirty) { update_progress_stats(); } + if (!$gotsome) { + select (undef, undef, undef, 0.1); + } $working_slot_count = scalar(grep { $_->{node}->{fail_count} == 0 && $_->{node}->{hold_count} < 4 } @slot); if (($thisround_failed_multiple >= 8 && $thisround_succeeded == 0) || @@ -1340,8 +1346,9 @@ sub check_squeue # squeue check interval (15s) this should make the squeue check an # infrequent event. my $silent_procs = 0; - for my $jobstep (values %proc) + for my $procinfo (values %proc) { + my $jobstep = $jobstep[$procinfo->{jobstep}]; if ($jobstep->{stderr_at} < $last_squeue_check) { $silent_procs++; @@ -1350,17 +1357,18 @@ sub check_squeue return if $silent_procs == 0; # use killem() on procs whose killtime is reached - while (my ($pid, $jobstep) = each %proc) + while (my ($pid, $procinfo) = each %proc) { - if (exists $jobstep->{killtime} - && $jobstep->{killtime} <= time + my $jobstep = $jobstep[$procinfo->{jobstep}]; + if (exists $procinfo->{killtime} + && $procinfo->{killtime} <= time && $jobstep->{stderr_at} < $last_squeue_check) { my $sincewhen = ""; if ($jobstep->{stderr_at}) { $sincewhen = " in last " . (time - $jobstep->{stderr_at}) . "s"; } - Log($jobstep->{jobstep}, "killing orphaned srun process $pid (task not in slurm queue, no stderr received$sincewhen)"); + Log($procinfo->{jobstep}, "killing orphaned srun process $pid (task not in slurm queue, no stderr received$sincewhen)"); killem ($pid); } } @@ -1395,12 +1403,12 @@ sub check_squeue } # Check for child procs >60s old and not mentioned by squeue. - while (my ($pid, $jobstep) = each %proc) + while (my ($pid, $procinfo) = each %proc) { - if ($jobstep->{time} < time - 60 - && $jobstep->{jobstepname} - && !exists $ok{$jobstep->{jobstepname}} - && !exists $jobstep->{killtime}) + if ($procinfo->{time} < time - 60 + && $procinfo->{jobstepname} + && !exists $ok{$procinfo->{jobstepname}} + && !exists $procinfo->{killtime}) { # According to slurm, this task has ended (successfully or not) # -- but our srun child hasn't exited. First we must wait (30 @@ -1409,8 +1417,8 @@ sub check_squeue # terminated, we'll conclude some slurm communication # error/delay has caused the task to die without notifying srun, # and we'll kill srun ourselves. - $jobstep->{killtime} = time + 30; - Log($jobstep->{jobstep}, "notice: task is not in slurm queue but srun process $pid has not exited"); + $procinfo->{killtime} = time + 30; + Log($procinfo->{jobstep}, "notice: task is not in slurm queue but srun process $pid has not exited"); } } } @@ -1432,15 +1440,21 @@ sub readfrompipes foreach my $job (keys %reader) { my $buf; - while (0 < sysread ($reader{$job}, $buf, 8192)) + if (0 < sysread ($reader{$job}, $buf, 65536)) { print STDERR $buf if $ENV{CRUNCH_DEBUG}; $jobstep[$job]->{stderr_at} = time; $jobstep[$job]->{stderr} .= $buf; + + # Consume everything up to the last \n preprocess_stderr ($job); + if (length ($jobstep[$job]->{stderr}) > 16384) { - substr ($jobstep[$job]->{stderr}, 0, 8192) = ""; + # If we get a lot of stderr without a newline, chop off the + # front to avoid letting our buffer grow indefinitely. + substr ($jobstep[$job]->{stderr}, + 0, length($jobstep[$job]->{stderr}) - 8192) = ""; } $gotsome = 1; } @@ -1461,7 +1475,7 @@ sub preprocess_stderr # whoa. $main::please_freeze = 1; } - elsif ($line =~ /srun: error: (Node failure on|Aborting, io error)/) { + elsif ($line =~ /srun: error: (Node failure on|Aborting, .*\bio error\b)/) { my $job_slot_index = $jobstep[$job]->{slotindex}; $slot[$job_slot_index]->{node}->{fail_count}++; $jobstep[$job]->{tempfail} = 1; @@ -1471,7 +1485,7 @@ sub preprocess_stderr $jobstep[$job]->{tempfail} = 1; ban_node_by_slot($jobstep[$job]->{slotindex}); } - elsif ($line =~ /arvados\.errors\.Keep/) { + elsif ($line =~ /\bKeep(Read|Write|Request)Error:/) { $jobstep[$job]->{tempfail} = 1; } }