#!/usr/bin/env expect ############################################################################ # Purpose: Test of Slurm functionality # Confirm that a job executes with the specified nodes # (--nodelist and --exclude options). ############################################################################ # Copyright (C) 2002-2008 The Regents of the University of California. # Copyright (C) 2008 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette # CODE-OCEC-09-009. All rights reserved. # # This file is part of Slurm, a resource management program. # For details, see . # Please also read the included file: DISCLAIMER. # # Slurm is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Slurm is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along # with Slurm; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals set exit_code 0 # # Submit a 3+ node job # set host_0 "" set host_1 "" set host_2 "" set host_3 "" set job_id 0 set node_cnt 3 set timeout $max_job_delay set available [available_nodes] if {$available < $node_cnt} { skip "Not enough nodes currently available ($available avail, $node_cnt needed)" } set timeout $max_job_delay set salloc_pid [spawn $salloc -N$node_cnt -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) send "$bin_echo MY_ID=\$SLURM_JOB_ID \r" exp_continue } -re "More processors requested than permitted" { skip "Can't test salloc task distribution" } -re "Node count specification invalid" { skip "Can't test salloc task distribution" } -re "configuration not available" { skip "Partition too small for test" } -re "($number): *($re_word_str)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } if {$expect_out(1,string) == 1} { set host_1 $expect_out(2,string) } if {$expect_out(1,string) == 2} { set host_2 $expect_out(2,string) } if {$expect_out(1,string) == 3} { set host_3 $expect_out(2,string) } exp_continue } timeout { if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] fail "salloc not responding" } eof { wait } } # # Verify node count # if {[string compare $host_0 ""] == 0} { log_error "Did not get hostname of task 0" set exit_code 1 } if {[string compare $host_1 ""] == 0} { log_error "Did not get hostname of task 1" set exit_code 1 } if {[string compare $host_2 ""] == 0} { log_error "Did not get hostname of task 2" set exit_code 1 } if {[test_front_end] != 0} { skip "Additional testing is incompatible with front-end systems" } if {[string compare $host_3 ""] != 0} { log_error "Started more than three tasks" set exit_code 1 } set dup_hostname 0 if {[string compare $host_0 $host_1] == 0} { incr dup_hostname } if {[string compare $host_0 $host_2] == 0} { incr dup_hostname 1 } if {$dup_hostname == 1} { log_error "Re-used a node in the allocation" set exit_code 1 } set exclude_node $host_0 set include_node $host_2 # # Submit a job explicitly excluding a node # set host_0 "" set host_1 "" set host_2 "" set job_id 0 set timeout $max_job_delay set salloc_pid [spawn $salloc -N2 -t1 --exclude=$exclude_node $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "Invalid node name specified" { skip "Appears you are using multiple slurmd testing. This test won't work in that fashion" } -re "($number): *($re_word_str)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } if {$expect_out(1,string) == 1} { set host_1 $expect_out(2,string) } if {$expect_out(1,string) == 2} { set host_2 $expect_out(2,string) } exp_continue } timeout { if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] fail "salloc not responding" } eof { wait } } # # Verify node count and names # if {[string compare $host_0 ""] == 0} { log_error "Did not get hostname of task 0" set exit_code 1 } if {[string compare $host_1 ""] == 0} { log_error "Did not get hostname of task 1" set exit_code 1 } if {[string compare $host_2 ""] != 0} { log_error "Started more than two tasks" set exit_code 1 } set dup_hostname 0 if {[string compare $host_0 $exclude_node] == 0} { set dup_hostname 1 } if {[string compare $host_1 $exclude_node] == 0} { set dup_hostname 1 } if {$dup_hostname == 1} { log_error "Allocated an excluded node" set exit_code 1 } # # Submit a job explicitly including a node # set host_0 "" set host_1 "" set job_id 0 set timeout $max_job_delay set salloc_pid [spawn $salloc -N1 -t1 --nodelist=$include_node $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } -re "($number): *($re_word_str)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } if {$expect_out(1,string) == 1} { set host_1 $expect_out(2,string) } exp_continue } timeout { log_error "salloc not responding" if {$job_id != 0} { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] set exit_code 1 } eof { wait } } # # Verify node count and names # if {[string compare $host_0 ""] == 0} { fail "Did not get hostname of task 0" } if {[string compare $host_1 ""] != 0} { fail "Started more than one task" } set dup_hostname 0 if {[string compare $host_0 $include_node] == 0} { set dup_hostname 1 } if {$dup_hostname == 0} { fail "Allocation lacked an included node" } if {$exit_code != 0} { fail "Test failed due to previous errors (\$exit_code = $exit_code)" }