#!/bin/sh
#=======================================================================
# Copy files to cfs                                        --- mdump ---
# $Id: mdump_jobdef 669 2012-04-24 20:44:05Z acrnrls $
#=======================================================================
#
# Files dumped will be of the form
#
#    ${prefix}_${runid}_${year}_m${mon}_${suffix}
#
# for all months from previous_year, previous_month to current_year,
# current_month or from current_year, current_month to next_year,
# next_month depending on which of previous_(year|month) or
# next_(year|month) are set.
#
# Both prefix and suffix may have multiple values, in which case file
# name generation iterates over these multiple values. Values for prefix
# and suffix are defined by user supplied lists which are read from the
# parmsub variables mdump_prefix_list and mdump_suffix_list.
# See below for details of how these lists are composed.
#
# Larry Solheim Apr 2006
#=======================================================================
#
#     keyword :: mdump
# description :: dump model files to cfs
#

  set -a
  . betapath2

#  * ........................... Parmsub Parameters ............................

  # These variables are set when the job string is created
  previous_year=NotSet
  previous_month=NotSet

  current_year=NotSet
  current_month=NotSet

  next_year=NotSet
  next_month=NotSet

  run_start_year=NotSet
  run_start_month=NotSet
  run_stop_year=NotSet
  run_stop_month=NotSet

  runid="job000"; nqsprfx="${runid}_"; nqsext='';
  uxxx='uxxx'; model_uxxx=$uxxx; mdump_uxxx=$model_uxxx; # memory99=1
  crawork="${runid}_job"; username="acrnxxx"; user="XXX";

  jobname=mdump;
  stime="1800"; memory1="1500mb"; lopgm="lopgm";

  noprint=on
  nextjob=on

  # Temporary directory where this script will run
  mdump_CCRNTMP=''
  CCRNTMP=${mdump_CCRNTMP:=$CCRNTMP}

  # RUNPATH on execution machine
  mdump_RUNPATH=''
  if [ -n "$mdump_RUNPATH" -a "$mdump_RUNPATH" = "CCRNTMP" ]; then
    # Allow the special case of "mdump_RUNPATH = CCRNTMP" to force RUNPATH
    # and CCRNTMP to be the same dir
    RUNPATH=$CCRNTMP
  else
    RUNPATH=${mdump_RUNPATH:=$RUNPATH}
  fi

  # Alternate path to a directory where .queue/.crawork will be found
  JHOME=''

  if [ -n "$JHOME" -a x"$JHOME" != x"$HOME" ]; then
    # Allow optional reset of DATAPATH/RUNPATH
    JHOME_DATA=''
    DATAPATH=${JHOME_DATA:=$DATAPATH}
    RUNPATH=${JHOME_DATA:=$RUNPATH}
    # Allow optional reset of CCRNTMP
    JHOME_RUN=''
    CCRNTMP=${JHOME_RUN:=$CCRNTMP}
  fi

  # RMTRUNPATH is used when vic = on is specified
  mdump_RMTRUNPATH=''
  RMTRUNPATH=${mdump_RMTRUNPATH:=$RMTRUNPATH}

  # These variable definitions are required for lock file processing
  dump_cfsuser=$username
  mdump_cfsuser=$dump_cfsuser
  cfsuser=$mdump_cfsuser

  dump_masterdir=off
  mdump_masterdir=${dump_masterdir:=off}
  masterdir=${mdump_masterdir:=off}

  dump_shortermdir=on
  mdump_shortermdir=${dump_shortermdir:=on}
  shortermdir=${mdump_shortermdir:=on}

  # nontwrkchk = yes will turn off the md5sum check on the cfs
  # (but not on the front end)
  nontwrkchk=''

  # ---Start_submit_ignore_code----

  stamp=`date "+%j%H%M%S"$$`

  # Use -e option if recognized by echo
  if [ "X`echo -e`" = "X-e" ]; then
    echo_e() { echo ${1+"$@"}; }
  else
    echo_e() { echo -e ${1+"$@"}; }
  fi

  # bail is a simple error exit routine
  # Note: we write the error directly to a file in ~/.queue so that this
  #       info is not lost if/when stdout is not returned
  error_out="${JHOME:-$HOME}/.queue/error_mdump_${runid}_$stamp"
  [ ! -z "$error_out" ] && rm -f $error_out
  bail(){
    echo_e `date`" --- mdump: $*"
    echo_e `date`" --- mdump: $*" >>$error_out
    exit 1
  }
  wmsg(){
    echo_e `date`" --- mdump: $*" >>$error_out
  }

  # BERUNPATH must be set if files are to be copied from a machine that is
  # not the default back end machine
  bemach=''
  if [ -z "$bemach" ]; then
    # If bemach is not set explicitly then see if this job is executing on
    # a back end machine and use that machine name for bemach
    this_mach=`uname -n|awk -F\. '{print \$1}' -`
    on_back_end=0
    case $this_mach in
                     c[0-9]*) on_back_end=1 ;;
      zeta|saiph|spica|hadar) on_back_end=1 ;;
                 za|sa|sp|ha) on_back_end=1 ;;
    esac
    if [ $on_back_end -eq 1 ]; then
      # Set this_mach to its known alias
      case $this_mach in
        ha|ha*) this_mach=hadar ;;
        sp|sp*) this_mach=spica ;;
           c1*) this_mach=spica ;;
           c2*) this_mach=hadar ;;
        za|ze*) this_mach=zeta  ;;
        sa|sa*) this_mach=saiph ;;
           c6*) this_mach=saiph ;;
           c7*) this_mach=zeta  ;;
      esac
      bemach=$this_mach
    fi
  fi
  if [ -n "$bemach" ]; then
    # Reset BERUNPATH if bemach is set
    # Ensure bemach contains a known alias
    case $bemach in
      ha|ha*) bemach=hadar ;;
      sp|sp*) bemach=spica ;;
      za|ze*) bemach=zeta  ;;
      sa|sa*) bemach=saiph ;;
    esac
    XXX=`ssh $bemach echo '$RUNPATH' 2>/dev/null` || bail "Unable to determine BERUNPATH"
    BERUNPATH=$XXX
  else
    BERUNPATH=$BERUNPATH
  fi

  # The variables mdump_prefix_list and mdump_suffix_list are strings containing
  # embedded colons and whitespace which are interpreted as list delimiters.
  # These strings may be thought of as 2 dimensional arrays, the rows of these
  # arrays are colon (:) separated strings and each row is divided into columns
  # by separating on white space.
  # These variables are used by make_file_name_list along with runid and year/mon
  # information from the *_year and *_month variables to generate file names.

  # In make_file_name_list the prefix_list and suffix_list strings are first
  # separated into colon delimited lists (rows). There must be a equal number of
  # rows in each of prefix_list and suffix_list because these rows will be
  # used in pairs.
  # Each pair of rows (one row from prefix_list and one row from suffix_list)
  # is separated into a white space separated list. Each element of these
  # white space separated lists is a single prefix or suffix (possibly modified
  # by appending a "+" followed by a comma separated list of integers in the
  # range 1-12). No white space is allowed within a single prefix or suffix.
  # These individual (pre|suf)fixes are then iterated over for each year and
  # month and for each pair of rows in prefix_list and suffix_list to form the
  # desired set of file names, each of which is of the form
  #
  # ${prefix}_${runid}_${year}_m${mon}_${suffix}

  # Any prefix or suffix in these  lists may be modified by appending a +
  # followed by a comma separated list of numbers (no white space is allowed
  # within this modifier). Each number within the modifier list will correspond
  # to a month (1-12) for which a file with this suffix is to be included.
  # If the modifier exists for a particular suffix then only those months
  # indicated in the modifier will be added to the file list.

  # If the above form of file name is inappropriate then the user may
  # provide a template or templates to produce arbitrary file names.
  # These templates are defined in the variable mdump_prefix_list.
  # Any individual prefix will be treated as a file name template if it
  # begins with a "%" character. The template will consist of everthing
  # after the "%" character and up to the next colon or white space.
  # It can be composed of anything but must ultimately (after variable
  # substitution, etc) result in a valid file name. When a template
  # is encountered, it is used as the entire file name (ie the "normal" file
  # name form is disregarded as is the corresponding suffix(s)). However, it
  # is subject to the same interation procedure as a normal prefix and
  # does undergo variable substitution. Variables that are defined for
  # substitution include year, mon, runid, uxxx, start_year, start_mon,
  # stop_year, stop_mon, all of the *_year and *_month variables defined
  # above as well as any user supplied variable definitions passed to
  # make_file_name_list as a command line option of the form var=val.

  mdump_suffix_list=''
  suffix_list="${mdump_suffix_list:-gs ss rs+12 ab+12 an+12}"

  mdump_prefix_list=''
  prefix_list="${mdump_prefix_list:-$mdump_uxxx}"

  # Create a file containing a list of file names that may then be
  # "sourced" in the current environment to define the variables
  # file1, file2,..., file$join, join. These variables are used by
  # tdumper to compile the list of files to be archived.
  join=0

  # make_file_name_list uses the variables current_year, current_month,
  # previous_year, previous_month, next_year and next_month to
  # determine start and stop dates for file name creation.
  # It also uses runid, prefix_list and suffix_list from the current
  # environment to build these file names.

  which make_file_name_list || bail "make_file_name_list is not in your path"

  # Allow user supplied command line options for make_file_name_list
  # The following invocation of make_file_name_list will not allow multi-list
  # output so if any command line option is supplied that will turn on
  # multi-list output (e.g. --months_max=... --size_max=... --number_max=..)
  # then this script will abort.
  mdump_file_list_opts=''
  fopts="${mdump_file_list_opts:-}"

  mdump_mon_offset=''
  if [ -n "$mdump_mon_offset" ]; then
    # Set a user supplied month offset
    eval fopts=\"$fopts --mon_offset\=$mdump_mon_offset\"
  fi

  # Create a temporary file containing the file list
  tmp_file_list="${JHOME:-$HOME}/.queue/mdump_file_list_${runid}_${stamp}"
  make_file_name_list $fopts --nomulti_list $tmp_file_list >>$error_out 2>&1 ||\
    bail "Problem in make_file_name_list"
  rm -f $error_out

  [ ! -s "$tmp_file_list" ] && bail "Unable to create file list"

  # A file list was created ...source it
  : ; . $tmp_file_list

  # At this point file1, file2,... are defined in the current environment
  # as well as certain other variables such as start_year, start_mon,
  # stop_year and stop_mon which correspond to the start and stop dates
  # for the file names that were created.
  ym_range="${start_year}m${start_mon}_${stop_year}m${stop_mon}"

  # Delete or keep the temporary file that contains the file list
  mdump_keep_file_list=0
  XXX=`echo $mdump_keep_file_list|sed 's/ //g'`
  eval mdump_keep_file_list\=$XXX
  [ "$mdump_keep_file_list" = 'on'  ] && eval mdump_keep_file_list\=1
  [ "$mdump_keep_file_list" = 'off' ] && eval mdump_keep_file_list\=0
  [ "$mdump_keep_file_list" = 'yes' ] && eval mdump_keep_file_list\=1
  [ "$mdump_keep_file_list" = 'no'  ] && eval mdump_keep_file_list\=0
  if [ $mdump_keep_file_list -eq 1 ]; then
    # Rename the file list to include the year/month range
    # and place it in the users ~/.queue dir
    saved_file_list="${JHOME:-$HOME}/.queue/mdump_file_list_${runid}_${ym_range}_${stamp}"
    mv -f $tmp_file_list $saved_file_list
  else
    # Delete the file that contains the file list
    rm -f $tmp_file_list
  fi

  # These are defined for backward compatability
  mdump_start_year=$start_year
  mdump_start_mon=$start_mon
  mdump_end_year=$stop_year
  mdump_end_mon=$stop_mon

  # If arclabel is set then files to be dumped will be combined in a
  # cmcarc file with a name of the form {arclabel}_{Date-Time-Stamp}_arc
  # If arclabel is not set then files to be dumped will be combined in a
  # cmcarc file with a name of the form {common-prefix}_{Date-Time-Stamp}_arc
  mdump_arclabel="${mdump_uxxx}_${runid}_$ym_range"
  if [ x"$masterdir" != x"on" ]; then
    # Add extra info to the arclabel but only if this is not an "offical" run
    hasmodel=0
    hasdiag=0
    hasts=0
    for sfx in $mdump_suffix_list; do
      [ "$sfx" = "gs" ] && hasmodel=1
      [ "$sfx" = "ss" ] && hasmodel=1
      [ "$sfx" = "gz" ] && hasmodel=1
      [ "$sfx" = "td" ] && hasmodel=1
      [ "$sfx" = "cm" ] && hasmodel=1
      [ "$sfx" = "gp" ] && hasdiag=1
      [ "$sfx" = "xp" ] && hasdiag=1
      [ "$sfx" = "cp" ] && hasdiag=1
      [ "$sfx" = "ts" ] && hasts=1
    done
    if [ $hasdiag -eq 1 -a $hasmodel -eq 0 ]; then
      # assume that this is diagnostic data
      mdump_arclabel="${mdump_arclabel}_diag"
    fi
    if [ $hasmodel -eq 1 -a $hasdiag -eq 0 ]; then
      # assume that this is model data
      mdump_arclabel="${mdump_arclabel}_model"
    fi
    if [ $hasts -eq 1 -a \( $hasmodel -eq 0 -a $hasdiag -eq 0 \) ]; then
      # assume that this is time series data
      mdump_arclabel="${mdump_arclabel}_ts"
    fi
  fi
  arclabel=${mdump_arclabel:-''}

  mdump_with_lock_file='';
  with_lock_file=${mdump_with_lock_file:=off}
  XXX=`echo $with_lock_file|sed 's/ //g'`
  eval with_lock_file\=$XXX
  [ "$with_lock_file" = 'on' ]  && eval with_lock_file\=1
  [ "$with_lock_file" = 'off' ] && eval with_lock_file\=0
  [ "$with_lock_file" = 'yes' ] && eval with_lock_file\=1
  [ "$with_lock_file" = 'no' ]  && eval with_lock_file\=0

  if [ $with_lock_file -eq 1 ]; then
    # Create a lock file and insert a job in the current crawork string
    # which will delete that lock file once this job is finished
    lock_name="${JHOME:-$HOME}/.queue/.crawork/lock_mdump_${runid}_${ym_range}_t"
    [ -e "$lock_name" ] && bail "Lock file exists: $lock_name"

    # Check the cfs to see if an arcfile whose name conforms to the current
    # definition of arclabel exists. If it does issue an error and abort.
    if [ x"$arclabel" != x"on" ]; then
      # Ensure that permissions on the arcfile just written are read only
      # for owner and group

      # USRX is the last 3 letters of the users login name
      # This should be the user that owns the arcfile
      if [ -n "$cfsuser" ]; then
        # Assume cfsuser is the owner if cfsuser is defined
        USRX=`echo $cfsuser|awk '{print substr($0,length($0)-2)}' -`
      else
        # Otherwise the invoking user must be the owner
        USRX=`whoami|awk '{print substr($0,length($0)-2)}' -`
      fi

      # Determine the directory on cfs in which the arcfile resides
      if [ x"$masterdir" = x"on" ]; then
        # runid must be the second underscore (_) separated field
        # in arclabel when masterdir is "on"
        xrunid=`echo $arclabel|awk -F_ '{print $2;exit}' -`
        # The first 2 characters of arclabel, together with the runid,
        # are used to determine the subdir in which the arc file lives
        ch1=`echo $arclabel|awk '{print substr($1,1,1)}' -`
        ch2=`echo $arclabel|awk '{print substr($1,2,1)}' -`
        [ -z "$ch1" ] && bail "Invalid arclabel prefix. arclabel = $arclabel"
        [ -z "$ch2" ] && bail "Invalid arclabel prefix. arclabel = $arclabel"
        ARCDIRX=/home/cfs_ccrd/ccrn/offcl_data/$ch2/$xrunid/$ch1
      else
        if [ x"$shortermdir" = x"on" ]; then
          ARCDIRX=/home/cfs_ccrd/ccrd_short_term_archive/$USRX
        else
          ARCDIRX=/home/cfs_ccrd/ccrd_user_archive/$USRX
        fi
      fi

###      # Get a listing for the arcfile from cfs
###      lsfail=0
###      lsline=0
###      if [ x"$masterdir" = x"on" ]; then
###        lscfs=`ssh cfs ls -lL $ARCDIRX/${arclabel}_\*_arc 2>&1` || lsfail=1
###        # If successful then determine the number of lines that matched
###        if [ $lsfail -eq 0 ]; then
###          lsline=`ssh cfs ls -lL $ARCDIRX/${arclabel}_\*_arc|wc -l` || lsfail=1
###        fi
###      else
###        # User arcfiles begin with uxxx_ where "xxx" is the
###        # last 3 letters in the users account name
###        lscfs=`ssh cfs ls -lL $ARCDIRX/u${USRX}_${arclabel}_\*_arc 2>&1` || lsfail=1
###        if [ $lsfail -eq 0 ]; then
###          lsline=`ssh cfs ls -lL $ARCDIRX/u${USRX}_${arclabel}_\*_arc|wc -l` || lsfail=1
###        fi
###      fi
###      if [ $lsfail -eq 0 ]; then
###        # If a conforming arcfile does exist then issue an error message and abort
###        wmsg "An arcfile conforming to arclabel --> $arclabel <-- already exists on cfs.\n$lscfs"
###        if [ $lsline -ne 1 ]; then
###          bail "More than one arcfile matches the label --> ${arclabel} <--"
###        fi
###        # Check on permissions and ownership
###        perm=`echo $lscfs|awk '{print $1;exit}' -`
###        xowner=`echo $lscfs|awk '{print $3;exit}' -`
###        xarcfname=`echo $lscfs|perl -ane'@N=split(q(/),$F[-1]);print $N[-1]'`
###        if [ x`echo $perm|sed -n '/.r--r-----/p'` = x ]; then
###          bail "Permissions on arcfile are incorrect."
###        fi
###        if [ x"$masterdir" = x"on" ]; then
###          if [ x"$xowner" != "xacrnsrc" ]; then
###            bail "Owner of arcfile should be acrnsrc when masterdir = on."
###          fi
###        fi
###        # If we get here then the permissions and ownership are ok suggesting that
###        # the arcfile contains a full set of valid data
###        wmsg
###        wmsg "This arcfile appears to have the correct permissions and ownership."
###        wmsg "It is possible that this arcfile is ok but you need to manually confirm"
###        wmsg "that all files are present and complete within it. If they are then you"
###        wmsg "can remove the first mdump job from your job string and resubmit."
###        wmsg "The files about to be dumped are:"
###        nn=0
###        while [ $nn -lt $join ]; do
###          nn=`expr $nn + 1`
###          eval curr_file=\$file$nn
###          wmsg "file$nn = $curr_file"
###        done
###        # wmsg "The arfile $xarcfname contains:"
###        # xcontents=`arc_lst $xarcfname`
###        # wmsg "$xcontents"
###        bail "Correct CFS problems and/or edit your crawork string then resubmit."
###      fi
    fi

    which splice_job_string || bail "splice_job_string is not in your path"
    # Create the lock file and insert a job to remove the lock file into the
    # crawork string immediately following the current job.
    # The variable "crawork" must be defined/exported in the current environment
    # Certain other variables will also be read from the env if present
    check_cfs_arcfile=on
    splice_job_string rmlock=$lock_name >>$error_out 2>&1 ||\
      bail "Problem in splice_job_string"
    rm -f $error_out
    touch $lock_name
  fi

  # Insert a job that will query the CFS archive database for the names
  # of the files to be dumped by this tdumper job and abort the string
  # if any of the file names are not in the database.
  # This job will run after the current job exits successfully but before
  # the lock file delete job that was inserted above (if any) runs.
  mdump_with_lsarc='';
  with_lsarc=${mdump_with_lsarc:=on}
  XXX=`echo $with_lsarc|sed 's/ //g'`
  eval with_lsarc\=$XXX
  [ "$with_lsarc" = 'on' ]  && eval with_lsarc\=1
  [ "$with_lsarc" = 'off' ] && eval with_lsarc\=0
  [ "$with_lsarc" = 'yes' ] && eval with_lsarc\=1
  [ "$with_lsarc" = 'no' ]  && eval with_lsarc\=0

  if [ $with_lsarc -eq 1 ]; then
    # Create the lsarc job and provide it with the list of file names created above
    tmp_flst2="${JHOME:-$HOME}/.queue/mdump_file_list2_${runid}_${stamp}"
    nn=0
    while [ $nn -lt $join ]; do
      nn=`expr $nn + 1`
      eval fname=\$file$nn
      echo "$fname" >> $tmp_flst2
    done

    # Indicate how cccjob should be invoked
    # Setting CCCJOB_ROOT will allow a job specific version of cccjob to used
    CCCJOB_ROOT=''
    if [ -z "$CCCJOB_ROOT" ]; then
      CCCJOB_ENV=''
    else
      eval CCCJOB_ENV=\'env CCCJOB_ROOT\=$CCCJOB_ROOT\'
    fi

    # Provide command line defs for JHOME and JHOME_DATA
    # if these variables are defined in the current env
    JHOME_opt=''
    [ -n "$JHOME" ]      && JHOME_opt="JHOME=$JHOME "
    [ -n "$JHOME_DATA" ] && JHOME_opt="$JHOME_opt JHOME_DATA=$JHOME_DATA "
    [ -n "$JHOME_RUN" ]  && JHOME_opt="$JHOME_opt JHOME_RUN=$JHOME_RUN "

    try_CFSDATA=0
    job_string_to_insert="INSERT_JOB_STRING_${runid}_$stamp"
    rm -f $job_string_to_insert
    touch $job_string_to_insert
    $CCCJOB_ENV cccjob --out=$job_string_to_insert --job="lsarc=${tmp_flst2}:s" \
      --start="${start_year}:${start_mon}" --stop="${stop_year}:${stop_mon}" \
      runid=$runid uxxx=$mdump_uxxx masterdir=$masterdir cfsuser=$cfsuser \
      crawork=$crawork noprint=$noprint try_CFSDATA=$try_CFSDATA \
      lsarc_suffix_list="$suffix_list" lsarc_prefix_list="$prefix_list" \
      $JHOME_opt >>$error_out 2>&1

    rm -f $tmp_flst2

    # Insert the job just created into the existing string from which this job came
    # The variable "crawork" must be defined/exported in the current environment
    splice_job_string $job_string_to_insert >>$error_out 2>&1 ||\
      bail "Problem in splice_job_string"
    rm -f $job_string_to_insert $error_out
  fi

  # ---Stop_submit_ignore_code----

#  * ............................ Condef Parameters ............................

  mdump_qsublog=''
  qsublog=${mdump_qsublog:=off}

  # besc = on causes a tdumper job to run on the back end (spica/hadar)
  mdump_besc=''
  besc=${mdump_besc:=off}

  # The default if none of sv, both, vic or svvic are set to "on" is to
  # dump files from the back end machine

  # sv : on means dump files saved on the front end
  dump_sv=off
  mdump_sv=${dump_sv:=off}
  sv=${mdump_sv:=off}

  # svsave : on means save files on the front end when dumping
  #          from the back end or from UVic
  dump_svsave=off
  mdump_svsave=${dump_svsave:=off}
  svsave=${mdump_svsave:=off}

  # Note: Once again submit3 is causing a problem. It will not respect end of line
  # comments inside its internally created file named job.info. As a result a
  # constructs such as "vic =" are flagged as a syntax error even though they
  # are commented out.
  both=off    # both  : on means dump files saved on the front and back end machines
  vic=off     # vic   : on means dump files saved in Victoria
  svvic=off   # svvic : on means dump files saved on the front end and in Victoria

  # nolist : off means save CFSDATA* files in ~/info
  # nolist : on means do not save CFSDATA* files in ~/info
  mdump_nolist=''
  nolist=${mdump_nolist:=on}

  # rmdskcpy = on means remove the copy of the file on the cfs disk buffer
  # as soon as it has been written to tape
  mdump_rmdskcpy=''
  rmdskcpy=${mdump_rmdskcpy:=off}

  # cnfsallow = on will allow the user to use a cnfs dir for
  # RUNPATH and/or CCRNTMP when svsave = on
  # Normally this is not allowed because of the possibility that files
  # may get corrupted during transfer from/to the nfs file system
  mdump_cnfsallow=''
  cnfsallow=${mdump_cnfsallow:=off}

  # dpalist = on means store DATAPATH information in a local file for use by access/save/delete
  # Since this local file is never deleted, dpalist = on should only be used when access,
  # save or delete are part of a job that runs in a temporary working directory which gets
  # removed after the job completes. Most decks do this, but not all. You have been warned!
  mdump_dpalist=''
  dpalist=${mdump_dpalist:=''}

#  * ............................. Deck Definition .............................

  # Note: If tdumper is placed inside a conditional then submit3 will fail.
  # submit3 will ignore everthing after the tdumper line when it creates an
  # internal file named job.info. It then attempts to source job.info and dies
  # due to a syntax error (the final fi is missing).
  . tdumper.dk

#end_of_job

