X-Git-Url: https://git.arvados.org/arvados.git/blobdiff_plain/395d48a60ed557318833a5272ab00dce270cec40..b54478ea1b7c8aaeaf565d591f32769bcdc09b8f:/sdk/cli/bin/crunch-job?ds=sidebyside diff --git a/sdk/cli/bin/crunch-job b/sdk/cli/bin/crunch-job index 14dac6a143..e0aff312cc 100755 --- a/sdk/cli/bin/crunch-job +++ b/sdk/cli/bin/crunch-job @@ -126,6 +126,7 @@ my $jobspec; my $job_api_token; my $no_clear_tmp; my $resume_stash; +my $cgroup_root = "/sys/fs/cgroup"; my $docker_bin = "docker.io"; my $docker_run_args = ""; GetOptions('force-unlock' => \$force_unlock, @@ -134,6 +135,7 @@ GetOptions('force-unlock' => \$force_unlock, 'job-api-token=s' => \$job_api_token, 'no-clear-tmp' => \$no_clear_tmp, 'resume-stash=s' => \$resume_stash, + 'cgroup-root=s' => \$cgroup_root, 'docker-bin=s' => \$docker_bin, 'docker-run-args=s' => \$docker_run_args, ); @@ -353,6 +355,7 @@ my @jobstep_done = (); my @jobstep_tomerge = (); my $jobstep_tomerge_level = 0; my $squeue_checked = 0; +my $sinfo_checked = 0; my $latest_refresh = scalar time; @@ -414,8 +417,17 @@ if ($docker_locator = $Job->{docker_image_locator}) { Log (undef, "docker image hash is $docker_hash"); $docker_stream =~ s/^\.//; my $docker_install_script = qq{ -if ! $docker_bin images -q --no-trunc --all | grep -qxF \Q$docker_hash\E; then - arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load +if $docker_bin images -q --no-trunc --all | grep -xF \Q$docker_hash\E >/dev/null; then + exit 0 +fi +declare -a exit_codes=("\${PIPESTATUS[@]}") +if [ 0 != "\${exit_codes[0]}" ]; then + exit "\${exit_codes[0]}" # `docker images` failed +elif [ 1 != "\${exit_codes[1]}" ]; then + exit "\${exit_codes[1]}" # `grep` encountered an error +else + # Everything worked fine, but grep didn't find the image on this host. + arv-get \Q$docker_locator$docker_stream/$docker_hash.tar\E | $docker_bin load fi }; @@ -430,7 +442,7 @@ fi # Determine whether this version of Docker supports memory+swap limits. ($exited, $stdout, $stderr) = srun_sync( - ["srun", "--nodelist=" . $node[0]], + ["srun", "--nodes=1"], [$docker_bin, 'run', '--help'], {label => "check --memory-swap feature"}); $docker_limitmem = ($stdout =~ /--memory-swap/); @@ -453,7 +465,7 @@ fi $try_user_arg = "--user=$try_user"; } my ($exited, $stdout, $stderr) = srun_sync( - ["srun", "--nodelist=" . $node[0]], + ["srun", "--nodes=1"], ["/bin/sh", "-ec", "$docker_bin run $docker_run_args $try_user_arg $docker_hash id --user"], {label => $label}); @@ -850,7 +862,11 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) .q{&& MEM=$(awk '($1 == "MemTotal:"){print $2}' /dev/null ; then VOLUMES+=("--volume=$(which crunchrunner):/usr/local/bin/crunchrunner") ; fi } + .q{&& if test -f /etc/ssl/certs/ca-certificates.crt ; then VOLUMES+=("--volume=/etc/ssl/certs/ca-certificates.crt:/etc/arvados/ca-certificates.crt") ; } + .q{elif test -f /etc/pki/tls/certs/ca-bundle.crt ; then VOLUMES+=("--volume=/etc/pki/tls/certs/ca-bundle.crt:/etc/arvados/ca-certificates.crt") ; fi }; $command .= "&& exec arv-mount --read-write --mount-by-pdh=by_pdh --mount-tmp=tmp --crunchstat-interval=10 --allow-other $arv_file_cache \Q$keep_mnt\E --exec "; $ENV{TASK_KEEPMOUNT} = "$keep_mnt/by_pdh"; @@ -860,7 +876,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) { my $containername = "$Jobstep->{arvados_task}->{uuid}-$Jobstep->{failures}"; my $cidfile = "$ENV{CRUNCH_TMP}/$containername.cid"; - $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 "; + $command .= "crunchstat -cgroup-root=\Q$cgroup_root\E -cgroup-parent=docker -cgroup-cid=$cidfile -poll=10000 "; $command .= "$docker_bin run $docker_run_args --name=$containername --attach=stdout --attach=stderr --attach=stdin -i \Q$dockeruserarg\E --cidfile=$cidfile --sig-proxy "; # We only set memory limits if Docker lets us limit both memory and swap. # Memory limits alone have been supported longer, but subprocesses tend @@ -915,6 +931,10 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) # For now, use the same approach as TASK_WORK above. $ENV{"JOB_WORK"} = "/tmp/crunch-job-work"; + # Bind mount the crunchrunner binary and host TLS certificates file into + # the container. + $command .= '"${VOLUMES[@]}" '; + while (my ($env_key, $env_val) = each %ENV) { if ($env_key =~ /^(ARVADOS|CRUNCH|JOB|TASK)_/) { @@ -940,7 +960,7 @@ for (my $todo_ptr = 0; $todo_ptr <= $#jobstep_todo; $todo_ptr ++) } } else { # Non-docker run - $command .= "crunchstat -cgroup-root=/sys/fs/cgroup -poll=10000 "; + $command .= "crunchstat -cgroup-root=\Q$cgroup_root\E -poll=10000 "; $command .= $stdbuf; $command .= "perl - $ENV{CRUNCH_SRC}/crunch_scripts/" . $Job->{"script"}; } @@ -1132,7 +1152,9 @@ sub update_progress_stats sub reapchildren { my $children_reaped = 0; - while ((my $pid = waitpid (-1, WNOHANG)) > 0) + my @successful_task_uuids = (); + + while((my $pid = waitpid (-1, WNOHANG)) > 0) { my $childstatus = $?; @@ -1141,13 +1163,6 @@ sub reapchildren . $slot[$proc{$pid}->{slot}]->{cpu}); my $jobstepidx = $proc{$pid}->{jobstepidx}; - if (!WIFEXITED($childstatus)) - { - # child did not exit (may be temporarily stopped) - Log ($jobstepidx, "child $pid did not actually exit in reapchildren, ignoring for now."); - next; - } - $children_reaped++; my $elapsed = time - $proc{$pid}->{time}; my $Jobstep = $jobstep[$jobstepidx]; @@ -1205,8 +1220,9 @@ sub reapchildren push @jobstep_todo, $jobstepidx; $Job->{'tasks_summary'}->{'failed'}++; } - else + else # task_success { + push @successful_task_uuids, $Jobstep->{'arvados_task'}->{uuid}; ++$thisround_succeeded; $slot[$proc{$pid}->{slot}]->{node}->{losing_streak} = 0; $slot[$proc{$pid}->{slot}]->{node}->{hold_until} = 0; @@ -1229,34 +1245,36 @@ sub reapchildren push @freeslot, $proc{$pid}->{slot}; delete $proc{$pid}; - if ($task_success) { - # Load new tasks - my $newtask_list = []; - my $newtask_results; - do { - $newtask_results = api_call( - "job_tasks/list", - 'where' => { - 'created_by_job_task_uuid' => $Jobstep->{'arvados_task'}->{uuid} - }, - 'order' => 'qsequence', - 'offset' => scalar(@$newtask_list), - ); - push(@$newtask_list, @{$newtask_results->{items}}); - } while (@{$newtask_results->{items}}); - foreach my $arvados_task (@$newtask_list) { - my $jobstep = { - 'level' => $arvados_task->{'sequence'}, - 'failures' => 0, - 'arvados_task' => $arvados_task - }; - push @jobstep, $jobstep; - push @jobstep_todo, $#jobstep; - } - } $progress_is_dirty = 1; } + if (scalar(@successful_task_uuids) > 0) + { + Log (undef, sprintf("%d tasks exited (%d succeeded), checking for new tasks from API server.", $children_reaped, scalar(@successful_task_uuids))); + # Load new tasks + my $newtask_list = []; + my $newtask_results; + do { + $newtask_results = api_call( + "job_tasks/list", + 'filters' => [["created_by_job_task_uuid","in",\@successful_task_uuids]], + 'order' => 'qsequence', + 'offset' => scalar(@$newtask_list), + ); + push(@$newtask_list, @{$newtask_results->{items}}); + } while (@{$newtask_results->{items}}); + Log (undef, sprintf("Got %d new tasks from API server.", scalar(@$newtask_list))); + foreach my $arvados_task (@$newtask_list) { + my $jobstep = { + 'level' => $arvados_task->{'sequence'}, + 'failures' => 0, + 'arvados_task' => $arvados_task + }; + push @jobstep, $jobstep; + push @jobstep_todo, $#jobstep; + } + } + return $children_reaped; } @@ -1384,6 +1402,37 @@ sub check_squeue } } +sub check_sinfo +{ + # If a node fails in a multi-node "srun" call during job setup, the call + # may hang instead of exiting with a nonzero code. This function checks + # "sinfo" for the health of the nodes that were allocated and ensures that + # they are all still in the "alloc" state. If a node that is allocated to + # this job is not in "alloc" state, then set please_freeze. + # + # This is only called from srun_sync() for node configuration. If a + # node fails doing actual work, there are other recovery mechanisms. + + # Do not call `sinfo` more than once every 15 seconds. + return if $sinfo_checked > time - 15; + $sinfo_checked = time; + + # The output format "%t" means output node states. + my @sinfo = `sinfo --nodes=\Q$ENV{SLURM_NODELIST}\E --noheader -o "%t"`; + if ($? != 0) + { + Log(undef, "warning: sinfo exit status $? ($!)"); + return; + } + chop @sinfo; + + foreach (@sinfo) + { + if ($_ != "alloc" && $_ != "alloc*") { + $main::please_freeze = 1; + } + } +} sub release_allocation { @@ -1452,6 +1501,9 @@ sub readfrompipes sub preprocess_stderr { my $jobstepidx = shift; + # slotindex is only defined for children running Arvados job tasks. + # Be prepared to handle the undef case (for setup srun calls, etc.). + my $job_slot_index = $jobstep[$jobstepidx]->{slotindex}; while ($jobstep[$jobstepidx]->{stderr} =~ /^(.*?)\n/) { my $line = $1; @@ -1461,21 +1513,18 @@ sub preprocess_stderr # whoa. $main::please_freeze = 1; } - elsif (!exists $jobstep[$jobstepidx]->{slotindex}) { - # Skip the following tempfail checks if this srun proc isn't - # attached to a particular worker slot. - } elsif ($line =~ /srun: error: (Node failure on|Aborting, .*\bio error\b)/) { - my $job_slot_index = $jobstep[$jobstepidx]->{slotindex}; - $slot[$job_slot_index]->{node}->{fail_count}++; $jobstep[$jobstepidx]->{tempfail} = 1; - ban_node_by_slot($job_slot_index); + if (defined($job_slot_index)) { + $slot[$job_slot_index]->{node}->{fail_count}++; + ban_node_by_slot($job_slot_index); + } } elsif ($line =~ /srun: error: (Unable to create job step|.*: Communication connection failure)/) { $jobstep[$jobstepidx]->{tempfail} = 1; - ban_node_by_slot($jobstep[$jobstepidx]->{slotindex}); + ban_node_by_slot($job_slot_index) if (defined($job_slot_index)); } - elsif ($line =~ /arvados\.errors\.Keep/) { + elsif ($line =~ /\bKeep(Read|Write|Request)Error:/) { $jobstep[$jobstepidx]->{tempfail} = 1; } } @@ -1889,7 +1938,6 @@ sub freezeunquote return $s; } - sub srun_sync { my $srunargs = shift; @@ -1944,6 +1992,7 @@ sub srun_sync if (!$busy || ($latest_refresh + 2 < scalar time)) { check_refresh_wanted(); check_squeue(); + check_sinfo(); } if (!$busy) { select(undef, undef, undef, 0.1); @@ -1963,6 +2012,11 @@ sub srun_sync delete $reader{$jobstepidx}; my $j = pop @jobstep; + # If the srun showed signs of tempfail, ensure the caller treats that as a + # failure case. + if ($main::please_freeze || $j->{tempfail}) { + $exited ||= 255; + } return ($exited, $j->{stdout_captured}, $j->{stderr_captured}); }