#!/bin/bash
#
#   Usage: qrun [options] Program_to_Execute
########################################################################
#
# Larry Solheim Jan,2014
#

FULLPATH=`type $0|awk '{print $3}'`
Runame=`basename $FULLPATH`
usage() {
  err_exit=0
  while getopts e opt
  do
    case $opt in
      e) err_exit=1 ;;
    esac
  done
  shift `expr $OPTIND - 1`

  [ -n "$1" ] && echo >&2 "${this_mach}:${Runame}:" "$@"
  echo >&2 " "
  sed >&2 -n '/^###/q; s/^#$/# /; s/^ *$/# /; 3,$s/^# //p;' "$FULLPATH"
  if [ $err_exit -eq 0 ]; then
    exit
  else
    exit 1
  fi
}

# Enable extended pattern matching
shopt -s extglob

bail(){
echo "${Runame}: *** ERROR *** $1"
exit 1
}

# echo without return (ie: emulate -n if not recognized by echo)
if [ "X`echo -n`" = "X-n" ]; then
  echo_n() { echo ${1+"$@"}'\c'; }
else
  echo_n() { echo -n ${1+"$@"}; }
fi

this_fqhn() {
  # usage: this_fqhn unqualified_machine_name
  # purpose: Determine a fully qualified hostname given a short name or alias
  local mach=$1
  # These variables are set by this function
  FQHN=''   # Fully qualified host name of mach
  FQHIP=''  # The IP address for FQHN
  local tmp_arr
  [ -z "$mach" ] && bail "this_fqhn: Requires an argument."
  if [[ "$mach" =~ '.' ]]; then
    # If the user supplied machine name contains a '.' then assume it is already
    # the fully qualified name
    FQHN=$mach
    # Validate the resulting hostname
    tmp_arr=($(host $FQHN)) ||
        bail "this_fqhn: Unable to determine fully qualified host name for ${mach}."
    # Determine the IP address
    tmp_arr=($(host $FQHN|head -1))
    FQHIP=${tmp_arr[$((${#tmp_arr[@]}-1))]}
    return 0
  fi
  # The following assumes resolv.conf is properly configured
  # Note: We cannot use "local RESOLV_LINE=($(grep -E '^ *(search|domain)' ..."
  # because the assignment will always return a zero exit status in this case
  # and any error in the subshell will not get trapped
  local RESOLV_LINE
  RESOLV_LINE=($(grep -E '^ *(search|domain)' /etc/resolv.conf)) ||
      bail "this_fqhn: Unable to determine default domain from resolv.conf."
  # Assume that the first word after the keyword is our domain
  # Strip any "int." prefix from this name (CMC quirk)
  FQHN="${mach}.${RESOLV_LINE[1]#@(int.)}"
  # Validate the resulting hostname
  tmp_arr=($(host $FQHN)) ||
      bail "this_fqhn: Unable to determine fully qualified host name for ${mach}."
  # Determine the IP address
  tmp_arr=($(host $FQHN|head -1))
  # Note: apparently some bash installations do not support "-1" as an array index
  FQHIP=${tmp_arr[$((${#tmp_arr[@]}-1))]}
  return 0
}

verify_pint() {
  # Ensure all variables found on the command line are positive integers
  [ -z "$1" ] && bail "verify_pint: At least one argument is required."
  for local_var in "${@##*( )}"; do
    # Strip any trailing space from the variable name
    # Any leading space was removed on the previous line
    local_var=${local_var%%*( )}
    test -z "$local_var" && bail "verify_pint: Null or blank variable name found."
    test "$local_var" = "${local_var//+( )}" ||
        bail "Variable name -->${local_var}<-- contains internal space."
    # Strip leading and trailing spaces from the input variable value
    eval local_val=\`echo \$$local_var\|sed \'s/^ \*//g\; s/ \*\$//g\'\`
    # Redefine the input variable with any leading/trailing space removed
    eval ${local_var}=\$local_val
    # Test for positive integer
    test -n "$local_val" -a $local_val -gt 0 > /dev/null 2>&1 ||
        bail "verify_pint: $local_var = $local_val is not a positive integer."
  done
}

# this_host will be the current hostname as output from uname
this_host=`uname -n|awk -F. '{print $1}' -`

# Optionally define a prefix for all ssh commands
# to reroute through a head node when required
ROUTE_SSH=''

# this_mach will be a known alias (or possibly the actual machine name)
this_mach=$this_host
case $this_mach in
    c1*) this_mach=spica
         ROUTE_SSH="ssh spica"
         ;;
    c2*) this_mach=hadar
         ROUTE_SSH="ssh hadar"
         ;;
   ib3*) this_mach=pollux
         ROUTE_SSH="ssh pollux"
         ;;
 joule*) this_mach=joule
         ROUTE_SSH="ssh joule"
         ;;
esac

# A unique(ish) string used in file names etc
stamp=$(date "+%Y%m%d_%H%M%S")

# Set defaults
verbose=5
dry_run=0
keep=0
tasks_per_node=1
max_tasks_per_node=1
nodes=1
mpi_tasks=0
omp_tasks=0
time_limit=1800
memory_limit=0
qjob_name=''
std_output=''
std_error=''
geometry=''
user_batch_job=''
declare -a all_tasks
declare -a NODE
declare -A KEYWD

# sub_mach is the name of the machine/cluster to which the batch job will be submitted
# The default machine us set here but may be changed via the '-d' command line option
sub_mach=hadar

# Store command line args from this invocation in case we need to submit remotely
qrun_args=''

# process single char command line options
while getopts n:p:d:t:m:o:e:j:g:vkhx opt
do
  case $opt in
    n) nodes=$OPTARG
       qrun_args="$qrun_args -n $OPTARG"
       ;;
    p) tasks_per_node=$OPTARG
       qrun_args="$qrun_args -p $OPTARG"
       ;;
    d) sub_mach=$OPTARG
       qrun_args="$qrun_args -d $OPTARG"
       ;;
    t) time_limit=$OPTARG
       qrun_args="$qrun_args -t $OPTARG"
       ;;
    m) memory_limit=$OPTARG
       qrun_args="$qrun_args -m $OPTARG"
       ;;
    g) geometry=$OPTARG
       # Add single quotes around the -g arg passed to the remote machine
       # This will protect any embedded shell reserved chars
       qrun_args="$qrun_args -g '$OPTARG'"
       ;;
    o) # This eval will expand any variables embedded in the single quoted arg
       eval std_output=$OPTARG
       # Add single quotes around the -o arg passed to the remote machine
       # This will allow remote expansion of any embedded shell variable
       qrun_args="$qrun_args -o '$OPTARG'"
       ;;
    e) # This eval will expand any variables embedded in the single quoted arg
       eval std_error=$OPTARG
       # Add single quotes around the -e arg passed to the remote machine
       # This will allow remote expansion of any embedded shell variable
       qrun_args="$qrun_args -e '$OPTARG'"
       ;;
    j) qjob_name=$OPTARG
       qrun_args="$qrun_args -j $OPTARG"
       ;;
    v) verbose=`expr $verbose + 1`
       qrun_args="$qrun_args -v"
       ;;
    k) keep=1
       qrun_args="$qrun_args -k"
       ;;
    x) set -x
       qrun_args="$qrun_args -x"
       ;;
    h) usage ;;
    ?) usage -e $USAGE ;;
  esac
done
shift `expr $OPTIND - 1`

# Ensure sub_mach is a valid batch machine name/alias
case $sub_mach in
  sp|spica) sub_mach=spica ;;
  ha|hadar) sub_mach=hadar ;;
         *) bail "Invalid submission machine name --> $sub_mach <--" ;;
esac

# Assign remaining command line args to an array
# adding quotes around any variable definition values
nARGV=-1
declare -a ARGV
for arg in "$@"; do
  case $arg in
    *=*) # Add quotes around the RHS of all variable definitions
         var=`echo $arg|awk -F\= '{printf "%s",$1}' -`
         [ -z "$var" ] && bail "Invalid assignment on qrun command line --> $arg <--"
         val=`echo "$arg"|awk '{i=index($0,"=")+1;printf "%s",substr($0,i)}' -`
         if [ $this_mach = $sub_mach ]; then
           # On the execution machine use double quotes
           ARGV[++nARGV]="$var=\"$val\""
         else
           # Anywhere else use single quotes
           ARGV[++nARGV]="$var='$val'"
         fi
         ;;
      *) ARGV[++nARGV]="$arg" ;;
  esac
done

# Loop over non-option command line args to separate the batch job command line from
# the rest and add any user supplied variable definitions to the current environment
for idx in "${!ARGV[@]}"; do
  # Keep track of the array index and the element value in local vars
  curr_arg=${ARGV[$idx]}
  [ $verbose -gt 0 ] && echo "## ${this_mach}: idx = $idx   arg = $curr_arg"
  case $curr_arg in
      -) # All remaining command line args after this dash are part of qjob_cmd
         if [ -z "$qjob_cmd" ]; then
           qjob_cmd="${ARGV[@]:$((idx+1))}"
         else
           qjob_cmd="$qjob_cmd ${ARGV[@]:$((idx+1))}"
         fi
         break
         ;;
    *=*) # This is a variable definition
         var=`echo $curr_arg|awk -F\= '{printf "%s",$1}' -`
         val=`echo "$curr_arg"|awk '{i=index($0,"=")+1;printf "%s",substr($0,i)}' -`
         # Add this variable to the current environment
         if [ $this_mach = $sub_mach ]; then
           # The second eval is to expand any embedded shell variables
           eval eval ${var}=\$val
         else
           eval ${var}=\$val
         fi
         # Reset val to the value of var in the current environment
         eval val=\$$var
         if [ $this_mach = $sub_mach -a $verbose -gt 1 ]; then
           echo "## ${this_mach}: Added $var = -->$val<-- to the current environment"
         fi
         if [ $this_mach != $sub_mach -a $verbose -gt 2 ]; then
           echo "## ${this_mach}: Added $var = -->$val<-- to the current environment"
         fi
         case $var in
                    nodes) usr_set_nodes=1  ;;
           tasks_per_node) usr_set_tasks_per_node=1 ;;
         esac
         # Keep track of these definitions for remote submission, if required
         qrun_args="$qrun_args $curr_arg"
         ;;
      *) # Anything else is appended to the batch job command line
         if [ -z "$qjob_cmd" ]; then
           qjob_cmd="$curr_arg"
         else
           qjob_cmd="$qjob_cmd $curr_arg"
         fi
         ;;
  esac
done

##################################################################################
# Define mpi_tasks : The total number of mpi tasks requested
# Define     nodes : The total number of nodes requested
# Define      NODE : An array mapping mpi tasks to specific nodes
# Define all_tasks : An array containing a list of all mpi tasks
# Define or validate geometry : used with the LoadLeveler task_geometry keyword
##################################################################################
if [ -n "$geometry" ]; then
  ##################################################################################
  # The user has suppplied a value for task geometry
  ##################################################################################
  # An example of a valid task geometry is {(0,1,2,3)(4,5,6)(7)}
  # Strip all whitespace from geometry
  geometry=${geometry//+( )}
  # Verify the user supplied task geometry value
  # Ensure first char is '{' and last char is '}'
  [ "$geometry" = "${geometry##+(\{)}" ] &&
      bail "No leading '{' found on user supplied task geometry --> $geometry <--"
  [ "$geometry" = "${geometry%%+(\})}" ] &&
      bail "No trailing '}' found on user supplied task geometry --> $geometry <--"
  # Strip off leading and trailing braces and assign to the temporary variable geom
  geom=${geometry##+(\{)}
  geom=${geom%%+(\})}
  # Look for invalid characters, that is anything that is not '(' ')' ',' '[0-9]'
  [ -z "${geom//+(\(|\)|[0-9]|,)}" ] ||
      bail "Invalid user supplied task geometry --> $geometry <--"
  # Ensure that the string now contains a leading '(' and a trailing ')'
  [ "$geom" = "${geom##+(\()}" ] &&
      bail "Missing first '(' in user supplied task geometry --> $geometry <--"
  [ "$geom" = "${geom%%+(\))}" ] &&
      bail "Missing last ')' in user supplied task geometry --> $geometry <--"
  # Strip off first and last parentheses
  geom=${geom##+(\()}
  geom=${geom%%+(\))}
  # Replace all ')(' pairs with a single dash
  geom=${geom//@(\)\()/-}
  # If there are any parentheses left then the string is invalid
  [ "$geom" = "${geom//*(\)|\()}" ] ||
      bail "Invalid user supplied task geometry --> $geometry <--"
  # If there are any adjacent dashes then there is an empty node e.g. (0,2)()(1,3)
  [ "$geom" = "${geom//@(--)}" ] ||
      bail "Invalid user supplied task geometry. Empty node in --> $geometry <--"
  # If there is a dash at either end then there is an empty node e.g. ()(0,2)(1,3)
  [ "$geom" = "${geom#@(-)}" ] ||
      bail "Invalid user supplied task geometry. Empty node in --> $geometry <--"
  [ "$geom" = "${geom%@(-)}" ] ||
      bail "Invalid user supplied task geometry. Empty node in --> $geometry <--"
  # Replace each dash with a space and assign the resulting list to an array
  NODE=(${geom//@(-)/ })
  # Loop over nodes and verify the contents of each node specific string
  for i in "${!NODE[@]}"; do
    # At this point there can only be digits or commas in each node specific string
    # Ensure that the string does not contain leading or trailing commas
    [ "${NODE[i]}" = "${NODE[i]#+(,)}" ] ||
        bail "Invalid node --> (${NODE[i]}) <--   task geometry: $geometry"
    [ "${NODE[i]}" = "${NODE[i]%+(,)}" ] ||
        bail "Invalid node --> (${NODE[i]}) <--   task geometry: $geometry"
    # Ensure that there are no adjacent commas
    [ "${NODE[i]}" = "${NODE[i]//@(,,)}" ] ||
        bail "Invalid node --> (${NODE[i]}) <--   task geometry: $geometry"
    # Replace commas with spaces in the current node string
    NODE[i]="${NODE[i]//@(,)/ }"
    # Append this node list to all_tasks
    all_tasks+=(${NODE[i]})
    # Determine the max number of tasks in any one node
    this_node=(${NODE[i]})
    ntasks=${#this_node[@]}
    max_tasks_per_node=$(($ntasks>$max_tasks_per_node ? $ntasks : $max_tasks_per_node))
  done

  # Loop over tasks in all nodes to verify that no duplicate tasks exists
  typeset -a dup_tasks
  for idx1 in "${!all_tasks[@]}"; do
    for idx2 in "${!all_tasks[@]}"; do
      [ $idx1 -eq $idx2 ] && continue
      if [ ${all_tasks[$idx1]} = ${all_tasks[$idx2]} ]; then
        # This is a duplicate
        # Do not add this to dup_tasks if it is already there
        dup_exists=0
        for idx3 in "${!dup_tasks[@]}"; do
          if [ ${dup_tasks[$idx3]} = ${all_tasks[$idx1]} ]; then
            dup_exists=1
            break
          fi
        done
        [ $dup_exists -eq 0 ] && dup_tasks+=(${all_tasks[$idx1]})
      fi
    done
  done
  if [ ${#dup_tasks[@]} -gt 0 ]; then
    bail "Duplicate tasks --> ${dup_tasks[*]} <-- found in task geometry: $geometry"
  fi
  unset dup_tasks

  # Ensure that the maximum task number does not exceed one less than the total number of tasks
  max_task=0
  for idx1 in "${!all_tasks[@]}"; do
    if [ $idx1 -eq 0 ]; then
      max_task=${all_tasks[$idx1]}
      continue
    fi
    max_task=$((${all_tasks[$idx1]}>$max_task ? ${all_tasks[$idx1]} : $max_task))
  done
  max_task_allowed=$((${#all_tasks[@]}-1))
  [ $max_task -gt $max_task_allowed ] &&
      bail "Task number $max_task exceeds the allowable maximum ${max_task_allowed}."

  # Redefine nodes and mpi_tasks to be consistent with the supplied geometry
  nodes=${#NODE[@]}
  mpi_tasks=${#all_tasks[@]}

else

  ##################################################################################
  # No user supplied task geometry
  # Define task geometry based on the value of nodes and tasks_per_node
  ##################################################################################
  ntask=-1
  geometry='{'
  for (( n=0; n<$nodes; n++ )); do
    # Append the current node configuration to the geometry variable
    for (( task=0; task<$tasks_per_node; task++ )); do
      (( ntask++ ))
      all_tasks+=($ntask)
      if [ $task -eq 0 ]; then
        curr_node="($ntask"
      else
        curr_node="${curr_node},$ntask"
      fi
    done
    curr_node="${curr_node})"
    geometry="$geometry$curr_node"

    # Assign the current node configuration to the NODE array
    # Remove enclosing parentheses and replace commas with spaces
    curr_node=${curr_node##@(\()}
    curr_node=${curr_node%%@(\))}
    curr_node=${curr_node//@(,)/ }
    NODE[n]="$curr_node"
  done
  geometry="$geometry"'}'

  # Define max_tasks_per_node to be consistent with this geometry
  max_tasks_per_node=$tasks_per_node

  # Define mpi_tasks to be consistent with this geometry
  mpi_tasks=${#all_tasks[@]}
fi

if [ $verbose -gt 1 ]; then
  for idx in "${!NODE[@]}"; do
    echo "## ${this_mach}: NODE[$idx] mpi tasks: ${NODE[$idx]}"
  done
  echo "## ${this_mach}: nodes = $nodes  total mpi tasks = $mpi_tasks"
  [ $verbose -gt 2 ] && echo "## ${this_mach}: geometry = $geometry"
  [ $verbose -gt 2 ] && echo "## ${this_mach}: max_tasks_per_node = $max_tasks_per_node"
  [ $verbose -gt 3 ] && echo "## ${this_mach}: All tasks = ${all_tasks[*]}"
fi

# Ensure certain variable values are valid positive integers
verify_pint tasks_per_node max_tasks_per_node nodes mpi_tasks

# A command to execute on the batch machine is required on this command line
[ -z "$qjob_cmd" ] && bail "A program name is required on the command line"

# Extract the first word from qjob_cmd
# This will typically be the name of a user supplied script file,
# although it could be the first word of a single shell command line
tmp_arr=($qjob_cmd)
user_batch_job=$tmp_arr
unset tmp_arr

# At this point:
# qrun_args will contain all user supplied command line args except those
# that consitute the batch job command
# qjob_cmd will contain only the args that consitute the batch job command

if [ $verbose -gt 4 ]; then
  echo "## ${this_mach}: qrun_args = $qrun_args"
  echo "## ${this_mach}: qjob_cmd = $qjob_cmd"
fi

if [ $this_mach != $sub_mach ]; then
  # This must be run on the execution machine
  # The name of a temporary dir to be created on the remote machine
  tmpd="tmp_qrun_${stamp}_$$"
  if [ -s $user_batch_job ]; then
    # A file containing commands to be run in batch mode exists on this machine
    # Include a copy of this file to the remote machine in the remote command
    # We need the full pathname to this file for the remote copy
    full_path=$(readlink -f $user_batch_job)
    [ -z "$full_path" ] && bail "Unable to resolve full path to $user_batch_job"
    # Define a name for this file to be used on the remote machine
    remname="$(basename $user_batch_job)"
    # Replace the original script name in qjob_cmd with this file name
    qjob_cmd=${qjob_cmd/@($user_batch_job)/$remname}
    # If successful, the function this_fqhn will define FQHN
    this_fqhn $this_mach || bail "Problem executing this_fqhn"
    rmt_cmd='cd $HOME/tmp; '"mkdir ${tmpd}; cd ${tmpd}; scp ${FQHN}:$full_path ${remname}; qrun $qrun_args - ${qjob_cmd}; cd ..; rm -fr $tmpd"
  else
    rmt_cmd='cd $HOME/tmp; '"mkdir ${tmpd}; cd ${tmpd}; qrun $qrun_args - ${qjob_cmd}; cd ..; rm -fr $tmpd"
  fi
  [ $verbose -gt 1 ] && echo "## ${this_mach}: rmt_cmd = $rmt_cmd"
  $ROUTE_SSH ssh $sub_mach 'bash -s' -- <<< $rmt_cmd ||
      bail "Problem executing qrun on $sub_mach"

  # Always exit after issuing this remote command
  exit
fi

####################################################################################
# Define keywords for the LoadLeveler command file
####################################################################################

# Job name as it appears in the batch queue
if [ -z "$qjob_name" ]; then
  qjob_name=$(basename $user_batch_job)
fi
KEYWD[job_name]="#@ job_name = $qjob_name"

# The directory used as the initial working directory for the batch job
KEYWD[initialdir]="#@ initialdir = /tmp"

# Standard input for the batch job
KEYWD[input]="#@ input = /dev/null"

# Standard output file created by the batch job
if [ -z "$std_output" ]; then
  # If the user has not specified a location for stdout then define a default
  std_output="$HOME/.queue/${qjob_name}_${sub_mach}_${stamp}_out"
fi
KEYWD[output]="#@ output = $std_output"

# Standard error file created by the batch job
if [ -z "$std_error" ]; then
  std_error="$HOME/.queue/${qjob_name}_${sub_mach}_${stamp}_err"
fi
KEYWD[error]="#@ error = $std_error"

# Wall clock limit in seconds
KEYWD[wall_clock_limit]="#@ wall_clock_limit = $time_limit"

# Number of nodes requested (possibly reset below)
KEYWD[node]="#@ node = $nodes"

# Number of tasks required on each node (possibly reset below)
KEYWD[tasks_per_node]="#@ tasks_per_node = $tasks_per_node"

# Control simultaneous multithreading on all nodes (possibly reset below)
KEYWD[smt]="#@ smt = as_is"

if [ $mpi_tasks -gt 1 ]; then
  # This is a parallel job
        KEYWD[job_type]="#@ job_type = parallel"
      KEYWD[node_usage]="#@ node_usage = not_shared"
             KEYWD[smt]="#@ smt = on"
   KEYWD[task_geometry]="#@ task_geometry = $geometry"
            KEYWD[node]="#"
  KEYWD[tasks_per_node]="#"
else
  # This is a serial job
        KEYWD[job_type]="#@ job_type = serial"
      KEYWD[node_usage]="#@ node_usage = shared"
             KEYWD[smt]="#"
   KEYWD[task_geometry]="#"
            KEYWD[node]="#"
  KEYWD[tasks_per_node]="#"
fi

# Number of cpus requested on each node
cpus_per_node="ConsumableCpus($max_tasks_per_node)"

# memory requested on each node
if [ $memory_limit -eq 0 ]; then
  # The user has not requested a memory limit
  if [ $mpi_tasks -gt 1 ]; then
    # Nodes are not shared, use all available memory
    memory_limit=51gb
  else
    # Nodes are shared, be reasonable
    memory_limit=2gb
  fi
fi
memory_per_node="ConsumableMemory($memory_limit)"

# Node resources requested
KEYWD[node_resources]="#@ node_resources = $cpus_per_node $memory_per_node"

# An email address used for notifications
KEYWD[notify_user]="#@ notify_user = "`whoami`"@ec.gc.ca"

# Any environment variables required by this job
KEYWD[environment]="#@ environment = CCRNTMP=$CCRNTMP"

####################################################################################
# Create the job file to be submitted
####################################################################################

qjob=qrun_job_$$

# Create the preamble containing LoadLeveler keywords
cat > $qjob <<end_qjob_head
#!/bin/bash
#@ shell = /usr/bin/bash
#@ class = development
${KEYWD[job_name]}
${KEYWD[initialdir]}
${KEYWD[input]}
${KEYWD[output]}
${KEYWD[error]}
${KEYWD[notify_user]}
${KEYWD[wall_clock_limit]}
${KEYWD[job_type]}
${KEYWD[node_usage]}
${KEYWD[smt]}
${KEYWD[task_geometry]}
${KEYWD[node]}
${KEYWD[tasks_per_node]}
${KEYWD[node_resources]}
${KEYWD[environment]}
#@ checkpoint = no
#@ core_limit = unlimited
#@ notification = error
#@ queue

end_qjob_head

# Write initialize commands to qjob
cat >> $qjob <<'end_qjob_body'
# Initialize wall time
wall_in=`date +%s`

# Simple error exit routine
bail() {
  echo " *** ERROR *** $*"
  date
  wall_out=`date +%s`
  wall_time=`expr $wall_out - $wall_in`
  echo "Wall clock time in seconds: $wall_time"
  exit 1
}

echo "Time in: "`date`

# Keep track of this dir
CWD=`pwd`
echo "Initial dir is $CWD"

# Create a tmp dir and run there
stamp=`date "+%j%H%M%S"$$`
TMPDIR=$CCRNTMP/tmp_$stamp
mkdir $TMPDIR || bail "Unable to create tmp dir $TMPDIR"
echo "Execution dir is $TMPDIR"
cd $TMPDIR
echo " "

end_qjob_body

# Add task configuration information to the submission job
echo "  declare -a NODE"      >> $qjob
for idx in "${!NODE[@]}"; do
  echo "  NODE[$idx]=\"${NODE[$idx]}\"" >> $qjob
done
echo "  nodes=$nodes"         >> $qjob
echo "  mpi_tasks=$mpi_tasks" >> $qjob

# Insert the user supplied script to be run on the batch machine
echo " "              >> $qjob
if [ -s "$user_batch_job" ]; then
  cat $user_batch_job >> $qjob
else
  echo "$qjob_cmd"    >> $qjob
fi
echo " "              >> $qjob

# Write post processing commands to qjob
cat >> $qjob <<'end_qjob_body'

cd $CWD
echo "Time out: "`date`

# Show wall clock used
wall_out=`date +%s`
wall_time=`expr $wall_out - $wall_in`
echo "Wall clock time in seconds: $wall_time"

# Clean up
clean=1
if [ $clean -eq 1 ]; then
  rm -fr $TMPDIR
else
  echo "Execution dir $TMPDIR"
fi

end_qjob_body

if [ $verbose -gt 4 ]; then
  echo " "
  echo "Job to be submitted: $qjob"
  cat $qjob
fi

# Submit this job
if [ $dry_run -eq 0 ]; then
  llsubmit $qjob
fi

if [ $keep -eq 0 ]; then
  # Clean up the submission job
  [ -n "$qjob" ] && rm -f $qjob
else
  echo "## ${this_mach}: Submission job --> $qjob <-- remains on disk in `pwd`"
fi
