#!/bin/sh
#=======================================================================
# Create a job that will dump/diagnose/delete history --- pdump ---
# files in parallel with the current job string.
# $Id: pdump_jobdef 671 2012-05-04 23:35:22Z acrnrls $
#=======================================================================
#
# Larry Solheim  Jun,2008
#
# Files dumped/diagnosed/deleted will be of the form
#    ${pdump_prefix}_${year}_m${mon}_$suffix
# for all months from previous_year, previous_month to current_year,
# current_month or from current_year, current_month to next_year,
# next_month depending on which of previous_(year|month) or
# next_(year|month) are set.
#
# The variables current_year, current_month, previous_year and
# previous_month are set when the job string is created.
#
# A variable named pdump_suffix_list may also be set to modify the list
# of files that will be transferred/dumped. See below for details.
#
# pdump_prefix_ can be set to override the value of pdump_prefix
# files transferred/dumped will then be of the form
#    ${pdump_prefix_}${year}_m${mon}_$suffix
# note the missing underscore between pdump_prefix_ and year.
#
#=======================================================================
#
#     keyword :: pdump
# description :: parallel dump/diagnose of history files
#
 set -a
 . betapath2

 username=acrnxxx; user=MYNAME; crawork=pdump_job

 nextjob=on
 noprint=on
 debug=off

 stime=1800; memory1=900mb
 jobname=pdump; time=$stime ; memory=$memory1;

 # Temporary directory where this script will run
 CCRNTMP=$CCRNTMP

 # RUNPATH on execution machine
 RUNPATH=$RUNPATH

 # Alternate path to a directory where .queue/.crawork will be found
 JHOME=''

 if [ -n "$JHOME" -a x"$JHOME" != x"$HOME" ]; then
   # Allow optional reset of DATAPATH/RUNPATH
   JHOME_DATA=''
   DATAPATH=${JHOME_DATA:=$DATAPATH}
   RUNPATH=${JHOME_DATA:=$RUNPATH}
   # Allow optional reset of CCRNTMP
   JHOME_RUN=''
   CCRNTMP=${JHOME_RUN:=$CCRNTMP}
 fi

 # pdump_ssh_args may be used to supply additional
 # command line arguments to the ssh commands used below.
 # This is only useful for debugging.
 pdump_ssh_args=''

 # This is a debugging switch. Turning pdump_dry_run on will prevent any
 # copy or resubmission.
 pdump_dry_run=0

 # pdump_with_rtrans is used to control inclusion of the remote copy
 # If pdump_with_rtrans is set to "off" then only the tdumper job will run.
 pdump_with_rtrans=off
 XXX=`echo $pdump_with_rtrans|sed 's/ //g'`
 eval pdump_with_rtrans\=$XXX
 [ "$pdump_with_rtrans" = 'on' ]  && eval pdump_with_rtrans\=1
 [ "$pdump_with_rtrans" = 'off' ] && eval pdump_with_rtrans\=0
 [ "$pdump_with_rtrans" = 'yes' ] && eval pdump_with_rtrans\=1
 [ "$pdump_with_rtrans" = 'no' ]  && eval pdump_with_rtrans\=0

 # pdump_with_mdump is used to control inclusion of the tape dump job
 # If pdump_with_mdump is set to "off" then only the rtrans job will run.
 pdump_with_mdump=on
 XXX=`echo $pdump_with_mdump|sed 's/ //g'`
 eval pdump_with_mdump\=$XXX
 [ "$pdump_with_mdump" = 'on' ]  && eval pdump_with_mdump\=1
 [ "$pdump_with_mdump" = 'off' ] && eval pdump_with_mdump\=0
 [ "$pdump_with_mdump" = 'yes' ] && eval pdump_with_mdump\=1
 [ "$pdump_with_mdump" = 'no' ]  && eval pdump_with_mdump\=0

 # pdump_with_delete is used to control deleting history files after they are dumped
 pdump_with_delete=on
 XXX=`echo $pdump_with_delete|sed 's/ //g'`
 eval pdump_with_delete\=$XXX
 [ "$pdump_with_delete" = 'on' ]  && eval pdump_with_delete\=1
 [ "$pdump_with_delete" = 'off' ] && eval pdump_with_delete\=0
 [ "$pdump_with_delete" = 'yes' ] && eval pdump_with_delete\=1
 [ "$pdump_with_delete" = 'no' ]  && eval pdump_with_delete\=0

 # pdump_with_diag is used to control inclusion of the diagnostic job
 # If pdump_with_diag is set to "off" then no diagnostic job is launched.
 pdump_with_diag=off
 XXX=`echo $pdump_with_diag|sed 's/ //g'`
 eval pdump_with_diag\=$XXX
 [ "$pdump_with_diag" = 'on' ]  && eval pdump_with_diag\=1
 [ "$pdump_with_diag" = 'off' ] && eval pdump_with_diag\=0
 [ "$pdump_with_diag" = 'yes' ] && eval pdump_with_diag\=1
 [ "$pdump_with_diag" = 'no' ]  && eval pdump_with_diag\=0

 # pdump_with_rtd is used to control inclusion of run time diagnostics
 # If pdump_with_rtd is set to "off" then no run time diagnostics are used.
 pdump_with_rtd=on
 XXX=`echo $pdump_with_rtd|sed 's/ //g'`
 eval pdump_with_rtd\=$XXX
 [ "$pdump_with_rtd" = 'on' ]  && eval pdump_with_rtd\=1
 [ "$pdump_with_rtd" = 'off' ] && eval pdump_with_rtd\=0
 [ "$pdump_with_rtd" = 'yes' ] && eval pdump_with_rtd\=1
 [ "$pdump_with_rtd" = 'no' ]  && eval pdump_with_rtd\=0

 # If pdump_use_scp is set to "on" then scp will be used for transport
 # to the gateway machine. The default if pdump_use_scp = off is to use
 # nrcp for transport.
 pdump_use_scp=on
 XXX=`echo $pdump_use_scp|sed 's/ //g'`
 eval pdump_use_scp\=$XXX
 [ "$pdump_use_scp" = 'on' ]  && eval pdump_use_scp\=1
 [ "$pdump_use_scp" = 'off' ] && eval pdump_use_scp\=0
 [ "$pdump_use_scp" = 'yes' ] && eval pdump_use_scp\=1
 [ "$pdump_use_scp" = 'no' ]  && eval pdump_use_scp\=0

 # rtd_in_model_string = on  means put the rtdiag deck into the model string
 # rtd_in_model_string = off means put the rtdiag deck into the tdumper string
 rtd_in_model_string=on
 XXX=`echo $rtd_in_model_string|sed 's/ //g'`
 eval rtd_in_model_string\=$XXX
 [ "$rtd_in_model_string" = 'on' ]  && eval rtd_in_model_string\=1
 [ "$rtd_in_model_string" = 'off' ] && eval rtd_in_model_string\=0
 [ "$rtd_in_model_string" = 'yes' ] && eval rtd_in_model_string\=1
 [ "$rtd_in_model_string" = 'no' ]  && eval rtd_in_model_string\=0

 . comjcl.cdk
 cat > Execute_Script <<'end_of_script'

  # bail is a simple error exit routine
  bail(){
    echo "pdump: "$1
    echo " " >> haltit
    exit 1
  }

  username=acrnxxx; user=MYNAME; crawork=pdump_job
  runid=job000; uxxx=uxxx; pdump_uxxx=$uxxx;
  pdump_prefix=${pdump_uxxx}_${runid}; pdump_prefix_=${pdump_prefix}_

  noprint=on
  nextjob=on

  # ---Start_submit_ignore_code----

  # Indicate how cccjob should be invoked
  # Setting CCCJOB_ROOT will allow a job specific version of cccjob to used
  CCCJOB_ROOT=''
  if [ -z "$CCCJOB_ROOT" ]; then
    CCCJOB_ENV=''
  else
    eval CCCJOB_ENV=\'env CCCJOB_ROOT\=$CCCJOB_ROOT\'
  fi

  # pdump_suffix_list is a white space separated list of suffixes
  # of file names to be transferred/dumped. Any suffix in this list may be
  # modified by appending a + followed by a comma separated list of
  # numbers (no white space is allowed within this modifier). Each
  # number within the modifier list will correspond to a month (1-12)
  # for which a file with this suffix is to be included. If the
  # modifier exists for a particular suffix then only those months
  # indicated in the modifier will be added to the file list.
  # e.g. the inclusion of rs+12 in this list will copy only DEC restarts
  mdump_suffix_list=''
  pdump_suffix_list=${mdump_suffix_list:='gs ss gz cm ab+12 ob+12 an+12 rs+12 cs+12 os+12'}

  # pdel_suffix_list is used to generate file names of history files that
  # are to be deleted after they have been processed. 
  mdelete_suffix_list=''
  pdel_suffix_list=${mdelete_suffix_list:="gs ss gz cm rs os cs ab ob an _script"}

  # pdump_suffix_ flags the addition of an underscore between the
  # suffix and the month (e.g. m01_gs) in file names generated here
  pdump_suffix_=1
  XXX=`echo $pdump_suffix_|sed 's/ //g'`
  eval pdump_suffix_\=$XXX
  [ "$pdump_suffix_" = 'on' ]  && eval pdump_suffix_\=1
  [ "$pdump_suffix_" = 'off' ] && eval pdump_suffix_\=0
  [ "$pdump_suffix_" = 'yes' ] && eval pdump_suffix_\=1
  [ "$pdump_suffix_" = 'no' ]  && eval pdump_suffix_\=0

  # These variables are set when the job string is created
  run_start_year=NotSet
  run_start_month=NotSet
  run_stop_year=NotSet
  run_stop_month=NotSet

  current_year=NotSet
  current_month=NotSet

  previous_year=NotSet
  previous_month=NotSet

  next_year=NotSet
  next_month=NotSet

  [ $current_year = "NotSet" -o $current_month = "NotSet" ] && \
    bail "current_year or current_month is not set"

  # files transferred will be for months in the range from
  # pdump_start_year,pdump_start_mon to pdump_end_year,pdump_end_mon
  if [ $previous_year = "NotSet" -o $previous_month = "NotSet" ]; then
    if [ $next_year = "NotSet" -o $next_month = "NotSet" ]; then
      bail "Neither previous_(year|month) nor next_(year|month) are set"
    else
      pdump_start_year=$current_year
      pdump_start_mon=$current_month
      pdump_end_year=$next_year
      pdump_end_mon=$next_month
    fi
  else
    pdump_start_year=$previous_year
    pdump_start_mon=$previous_month
    pdump_end_year=$current_year
    pdump_end_mon=$current_month
  fi

  pdump_start_year=`echo $pdump_start_year|awk '{printf "%3.3d",$1}' -`
  pdump_start_mon=`echo $pdump_start_mon|awk '{printf "%2.2d",$1}' -`
  pdump_end_year=`echo $pdump_end_year|awk '{printf "%3.3d",$1}' -`
  pdump_end_mon=`echo $pdump_end_mon|awk '{printf "%2.2d",$1}' -`

#  # This invocation of make_file_name_list will process the *_year and *_months
#  # variables defined above and output a file containing definitions for
#  # start_year, start_mon, stop_year, stop_mon
#  tmp_file_list="pdump_date_list_${runid}_${stamp}"
#  make_file_name_list --dates_only $tmp_file_list >>$error_out 2>&1 ||\
#    bail "Problem in make_file_name_list"
#  rm -f $error_out
#
#  # Verify that the output list is not empty
#  [ ! -s "$tmp_file_list" ] && bail "Unable to create file list"
#
#  # A file list was created ...source it
#  # This will define start_year, start_mon, stop_year, stop_mon
#  : ; . $tmp_file_list
#  rm -f $tmp_file_list
#
#  # Define start and stop dates
#  pdump_start_year=$start_year
#  pdump_start_mon=$start_mon
#  pdump_stop_year=$stop_year
#  pdump_stop_mon=$stop_mon
#  pdump_end_year=$stop_year
#  pdump_end_mon=$stop_mon

  [ -z "$pdump_start_year" ] && bail "pdump_start_year is null"
  [ -z "$pdump_end_year" ]   && bail "pdump_end_year is null"
  [ -z "$pdump_start_mon" ]  && bail "pdump_start_mon is null"
  [ -z "$pdump_end_mon" ]    && bail "pdump_end_mon is null"
  [ $pdump_start_mon -gt 12 -o $pdump_start_mon -lt 1 ] &&\
    bail "pdump_start_mon=$pdump_start_mon is out of range"
  [ $pdump_end_mon -gt 12 -o $pdump_end_mon -lt 1 ] &&\
    bail "pdump_end_mon=$pdump_end_mon is out of range"
  [ $pdump_start_year -gt $pdump_end_year ] &&\
    bail "pdump_start_year=$pdump_start_year is out of range"

  # ym_range is used as part of some file names defined below
  ym_range="${pdump_start_year}m${pdump_start_mon}_${pdump_end_year}m${pdump_end_mon}"

  # Determine if the diagnostic files created when pdump_with_diag=on is invoked
  # are to be dumped to cfs and deleted from disk on the remote machine

  # pdump_diag_save_months is the number of months between saves of the diag files
  pdump_diag_save_months=60
  [ $pdump_diag_save_months -le 0 ] && bail "Invalid pdump_diag_save_months = ${pdump_diag_save_months}"

  # Initialize the start/stop year/month for dumping diagnostic files to tape
  dump_diag_start_year=0
  dump_diag_start_mon=0
  dump_diag_end_year=0
  dump_diag_end_mon=0
  dump_diag_files=0

  if [ $pdump_with_diag -eq 1 ]; then
    # Calculate the number of months since the start of the run
    # up to the last month of the current job
    months_to_date=`echo $run_start_year $run_start_month $pdump_end_year $pdump_end_mon|\
                    awk '{if ($1<=0 || $3<=0 || $3<$1) {print "0"; exit}
                          if ($2<1 || $2>12 || $4<1 || $4>12) {print "0"; exit}
                          if ($1 == $3) {m=$4-$2+1}
                          else {m=($3-$1-1)*12+13-$2+$4}; printf "%d",m}' -`
    [ $months_to_date -le 0 ] && \
      bail "Invalid months_to_date=$months_to_date ...run_start_year=$run_start_year run_start_month=$run_start_month pdump_end_year=$pdump_end_year pdump_end_mon=$pdump_end_mon"

    # Calculate the total number of months in this run
    months_in_run=`echo $run_start_year $run_start_month $run_stop_year $run_stop_month|\
                    awk '{if ($1<=0 || $3<=0 || $3<$1) {print "0"; exit}
                          if ($2<1 || $2>12 || $4<1 || $4>12) {print "0"; exit}
                          if ($1 == $3) {m=$4-$2+1}
                          else {m=($3-$1-1)*12+13-$2+$4}; printf "%d",m}' -`
    [ $months_in_run -le 0 ] && \
      bail "Invalid months_in_run=$months_in_run ...run_start_year=$run_start_year run_start_month=$run_start_month run_stop_year=$run_stop_year run_stop_mon=$run_stop_month"

    # Determine if this is the last month of the run
    is_last_month=`echo $pdump_end_year $pdump_end_mon $run_stop_year $run_stop_month|\
                   awk '{if ($1==$3 && $2==$4) {print "1"} else {print "0"}}' -`    

    # If pdump_diag_save_months divides evenly into months_to_date or this is the
    # last month in the run then it is time to dump diagnostic files to tape
    dump_diag_files=`echo $months_to_date $pdump_diag_save_months|awk '{r=$1%$2;printf "%d",r}' -`
    if [ $dump_diag_files -eq 0 -o $is_last_month -eq 1 ]; then
      # Set dump_diag_files true
      dump_diag_files=1
    else
      # Set dump_diag_files false
      dump_diag_files=0
    fi

    if [ $dump_diag_files -eq 1 ]; then
      # Determine the start/stop year/month for the current diag dump
      if [ $months_in_run -le $pdump_diag_save_months ]; then
        dump_diag_start_year=$run_start_year
        dump_diag_start_mon=$run_start_month
      elif [ $is_last_month -eq 1 ]; then
        # This is the last month of the run but it may not be
        # pdump_diag_save_months since the last diag save
        months_since_last_save=`echo $months_in_run $pdump_diag_save_months|\
                                awk '{m=$1%$2;printf "%d",m}' -`
        if [ $months_since_last_save -le 0 ]; then
          months_since_last_save=$pdump_diag_save_months
        fi
        start_year_month=`echo $months_since_last_save $pdump_end_year $pdump_end_mon|\
                          awk '{if ($1<=$3) {y=$2; m=$3-$1+1}
                                else {
                                  s=$1-$3;
                                  y=$2-1-int(s/12); m=13-(s%12)}
                                printf "%d %d",y,m}' -`

        dump_diag_start_year=`echo $start_year_month|awk '{printf "%d",$1}' -`
        dump_diag_start_mon=`echo $start_year_month|awk '{printf "%d",$2}' -`
      else
        # The start year/month will be pdump_diag_save_months prior to
        # pdump_end_year/pdump_end_mon
        start_year_month=`echo $pdump_diag_save_months $pdump_end_year $pdump_end_mon|\
                          awk '{if ($1<=$3) {y=$2; m=$3-$1+1}
                                else {
                                  s=$1-$3;
                                  y=$2-1-int(s/12); m=13-(s%12)}
                                printf "%d %d",y,m}' -`

        dump_diag_start_year=`echo $start_year_month|awk '{printf "%d",$1}' -`
        dump_diag_start_mon=`echo $start_year_month|awk '{printf "%d",$2}' -`
      fi
      dump_diag_end_year=$pdump_end_year
      dump_diag_end_mon=$pdump_end_mon
    fi
  fi

  # Define a partial file name
  fid=${runid}_$ym_range

  # Define a unique stamp for use in file names
  stamp=`date "+%j%H%M%S"$$`

  # Define a directory used to hold persistent files (lock files etc)
  persistd="${JHOME:-$HOME}/.queue/.crawork"

  # Generate a lock file name (this should be a full pathname)
  pdump_lock="${persistd}/lock_pdump_${fid}_$stamp"

  pdump_with_strict_lock=off
  XXX=`echo $pdump_with_strict_lock|sed 's/ //g'`
  eval pdump_with_strict_lock\=$XXX
  [ "$pdump_with_strict_lock" = 'on' ]  && eval pdump_with_strict_lock\=1
  [ "$pdump_with_strict_lock" = 'off' ] && eval pdump_with_strict_lock\=0
  [ "$pdump_with_strict_lock" = 'yes' ] && eval pdump_with_strict_lock\=1
  [ "$pdump_with_strict_lock" = 'no' ]  && eval pdump_with_strict_lock\=0

  # Check for the existence of previous lock files and abort if greater
  # than pdump_lock_max lock files currently exist
  pdump_lock_max=10
  if [ $pdump_with_strict_lock -eq 1 ]; then
    # Use a more restrictive test for lock files (currently experimental)
    found_lock=`(ls -1 ${persistd}/lock_pdump_${fid}_* || : ) 2>/dev/null`
  else
    found_lock=`(ls -1 ${persistd}/lock_pdump_${runid}_* || : ) 2>/dev/null`
  fi
  if [ ! -z "$found_lock" ]; then
    # If lock files exist then determine how many there are
    found_lock_n=`echo $found_lock|awk '{printf "%d",NF; exit}' -`
    echo "$found_lock_n pdump lock files currently exist"
    if [ $found_lock_n -gt $pdump_lock_max ]; then
      bail "runid=$runid  Existing lock file(s): $found_lock"
    fi
  fi

  ######### Create the parallel job ###########

  # These variables may be set when the pdump job string is created
  # If a variable is set then it will be used in the diag job

  # gateway is the name of the machine to which files
  # are sent prior to being moved off site
  pdump_front_end=''
  pxdump_gateway=${pdump_front_end:='pollux'}
  pdump_lock=$pdump_lock

  pxdump_with_mdump=${pdump_with_mdump:=on}
  pxdump_with_diag=${pdump_with_diag:=off}
  pxdump_with_rtrans=${pdump_with_rtrans:=off}

  mdump_arclabel=''
  mdump_uxxx=''
  mdump_prefix=''
  mdump_prefix_=''
  mdump_CCRNTMP=''
  mdump_RUNPATH=''
  mdump_qsublog=''
  mdump_with_lock_file=''
  check_cfs_arcfile=''
  nontwrkchk=''

  # besc = on causes a tdumper job to run on the back end (spica/hadar)
  pdump_besc=''
  besc=${pdump_besc:=off}

  # mdump_months can be used to serialize the mdump string created by pxdump
  # This is now the only option so mdump_months must be set
  mdump_months=''

  # This will not work unless mdump_months is non null
  [ -z "$mdump_months" ] && \
    bail "Must set mdump_months when using parallel dumping (pdump)"

  # Ensure that mdump_months contains only integers
  XXX_mdump_months=`echo $mdump_months | sed 's/[0-9][0-9]*//`
  [ -n "$XXX_mdump_months" ] && bail "Invalid mdump_months = $mdump_months"

  mdump_suffix_list=${pdump_suffix_list:=''}
  pullfe_suffix_list=$mdump_suffix_list

  pullfe_CCRNTMP=''
  pullfe_RUNPATH=''
  pullfe_log=''

  mdump_cfsuser=''
  cfsuser=''
  masterdir=''
  shortermdir=''
  nolist=''
  # sv should always be on when gateway is a front end machine
  # sv on means dump from front end disks
  eval sv\=on

  # BERUNPATH must be set if files are to be copied from a machine that is
  # not the default back end machine
  bemach=''
  if [ -z "$bemach" ]; then
    # If bemach is not set explicitly then see if this job is executing on
    # a back end machine and use that machine name for bemach
    this_mach=`uname -n|awk -F\. '{print \$1}' -`
    on_back_end=0
    case $this_mach in
                     c[0-9]*) on_back_end=1 ;;
      zeta|saiph|spica|hadar) on_back_end=1 ;;
                 za|sa|sp|ha) on_back_end=1 ;;
    esac
    if [ $on_back_end -eq 1 ]; then
      # Set this_mach to its known alias
      case $this_mach in
        ha|ha*) this_mach=hadar ;;
        sp|sp*) this_mach=spica ;;
           c1*) this_mach=spica ;;
           c2*) this_mach=hadar ;;
        za|ze*) this_mach=zeta  ;;
        sa|sa*) this_mach=saiph ;;
           c6*) this_mach=saiph ;;
           c7*) this_mach=zeta  ;;
      esac
      bemach=$this_mach
    fi
  fi
  if [ -n "$bemach" ]; then
    # Reset BERUNPATH if bemach is set
    # Ensure bemach contains a known alias
    case $bemach in
      ha|ha*) bemach=hadar ;;
      sp|sp*) bemach=spica ;;
      za|ze*) bemach=zeta  ;;
      sa|sa*) bemach=saiph ;;
    esac
    XXX=`ssh $bemach echo '$RUNPATH' 2>/dev/null` || bail "Unable to determine BERUNPATH"
    BERUNPATH=$XXX
    # Also assign back_end_mach
    eval back_end_mach\=$bemach
  else
    BERUNPATH=$BERUNPATH
  fi

  # pdump_back_end is the name of the machine to which the job that deletes
  # history files will be sent, if that job is created and used
  pdump_back_end=${back_end_mach:=''}

  # Define start/stop year/month cccjob options to be used below
  curr_year=`echo $pdump_start_year|awk '{printf "%d",$1}' -`
  curr_mon=`echo $pdump_start_mon|awk '{printf "%d",$1}' -`
  next_year=`echo $pdump_end_year|awk '{printf "%d",$1}' -`
  next_mon=`echo $pdump_end_mon|awk '{printf "%d",$1}' -`
  start_opt="--start_time=${curr_year}:$curr_mon"
  stop_opt="--stop_time=${next_year}:$next_mon"

  # Ensure that the value of months used in the cccjob command below is
  # consistent with the interval between start_opt and stop_opt 
  pdump_months=`echo $curr_year $curr_mon $next_year $next_mon|\
                awk '{if ($1<=0 || $3<=0 || $3<$1) {print "0"; exit}
                      if ($2<1 || $2>12 || $4<1 || $4>12) {print "0"; exit}
                      if ($1 == $3) {m=$4-$2+1}
                      else {m=($3-$1-1)*12+13-$2+$4}; printf "%d",m}' -`
  [ $pdump_months -le 0 ] && \
    bail "Invalid pdump_months = $pdump_months ...pdump_start_year = $pdump_start_year pdump_start_mon = $pdump_start_mon pdump_end_year = $pdump_end_year pdump_end_mon = $pdump_end_mon"
  eval months\=$pdump_months

  rtrans_remusr=''
  rtrans_remserver=''
  rtrans_remdir=''
  rtrans_force=''
  rtrans_scp_args=''
  rtrans_ssh_args=''
  rtrans_getput=''
  rtrans_suffix_list=''
  rtrans_uxxx=''
  rtrans_prefix=''
  rtrans_prefix_=''
  rtrans_suffix_=''

  # run time diagnostics variables
  year_rdiag_start=''
  year_rtdiag_start=''
  yearfirst=''
  PhysA=''
  PhysO=''
  CarbA=''
  CarbO=''
  CarbL=''
  resolution=''
  pdump_rtd_diag=''
  pdump_rtd_plot=''
  pdump_rtd_plot_freq=''
  pdump_rtd_with_plot=''
  pxrtd_with_plot=${pdump_rtd_with_plot:=''}
  pldir=''
  rtd_diag_deck=''
  rtd_plot_deck=''
  rtd_path=''

  rtd_in_series=on
  XXX=`echo $rtd_in_series|sed 's/ //g'`
  eval rtd_in_series\=$XXX
  [ -z "$rtd_in_series" ]      && eval rtd_in_series\=0
  [ "$rtd_in_series" = 'on' ]  && eval rtd_in_series\=1
  [ "$rtd_in_series" = 'off' ] && eval rtd_in_series\=0
  [ "$rtd_in_series" = 'yes' ] && eval rtd_in_series\=1
  [ "$rtd_in_series" = 'no' ]  && eval rtd_in_series\=0

  with_lsarc=''
  pdump_diag_job=''
  diag_uxxx=''
  model_uxxx=''
  hres=''
  modver=''
  xref01=''
  xref02=''
  xref03=''
  xref04=''
  xref05=''
  xref06=''
  xref07=''
  xref08=''
  xref09=''
  xref10=''
  xref11=''
  xref12=''
  xref13=''
  xref14=''
  xref15=''
  varlist='uxxx
           runid
           months
           noprint
           pxdump_gateway
           pdump_front_end
           pdump_back_end
           pdump_lock
           pdump_with_delete
           mdump_uxxx
           mdump_prefix
           mdump_prefix_
           mdump_suffix_list
           mdump_cfsuser
           mdump_CCRNTMP
           mdump_RUNPATH
           mdump_months
           mdump_qsublog
           mdump_with_lock_file
           check_cfs_arcfile
           nontwrkchk
           cfsuser
           masterdir
           shortermdir
           nolist
           sv
           besc
           bemach
           mdump_arclabel
           mdelete_suffix_list
           back_end_mach
           with_lsarc
           pdump_diag_job
           diag_uxxx
           model_uxxx
           hres
           modver'

  varlist2='rtrans_remusr
           rtrans_remserver
           rtrans_remdir
           rtrans_force
           rtrans_scp_args
           rtrans_ssh_args
           rtrans_getput
           rtrans_suffix_list
           rtrans_uxxx
           rtrans_prefix
           rtrans_prefix_
           rtrans_suffix_
           pxdump_with_diag
           pxdump_with_mdump
           pxdump_with_rtrans
           dump_diag_start_year
           dump_diag_start_mon
           dump_diag_end_year
           dump_diag_end_mon
           dump_diag_files
           year_rdiag_start
           year_rtdiag_start
           yearfirst
           PhysA
           PhysO
           CarbA
           CarbO
           CarbL
           resolution
           pdump_rtd_diag
           pdump_rtd_plot
           pdump_rtd_plot_freq
           pxrtd_with_plot
           pldir
           rtd_diag_deck
           rtd_plot_deck
           rtd_path
           rtd_in_series
           pullfe_log
           pullfe_CCRNTMP
           pullfe_RUNPATH
           pullfe_suffix_list
           CCCJOB_ROOT
           BERUNPATH
           run_start_year
           run_start_month
           run_stop_year
           run_stop_month'

  varlist3='xref01
            xref02
            xref03
            xref04
            xref05
            xref06
            xref07
            xref08
            xref09
            xref10
            xref11
            xref12
            xref13
            xref14
            xref15
            JHOME
            JHOME_DATA
            JHOME_RUN'

  # Create a file containing variable definitions used by the diag job

  # This eval will protect this crawork def from cccjob substitution
  eval craworkdef\=crawork\=pdump_${fid}_$stamp
  cat > cccjob_defs <<EOF
$craworkdef
EOF
  for var in $varlist; do
    eval val=\$$var
    # If this variable is defined add it to the list
    [ -n "$val" ] && echo ${var}=\'$val\' >> cccjob_defs
  done
  for var in $varlist2; do
    eval val=\$$var
    # If this variable is defined add it to the list
    [ -n "$val" ] && echo ${var}=\'$val\' >> cccjob_defs
  done
  for var in $varlist3; do
    eval val=\$$var
    # If this variable is defined add it to the list
    [ -n "$val" ] && echo ${var}=\'$val\' >> cccjob_defs
  done

  echo "cccjob_defs:"
  [ -s "cccjob_defs" ] && cat cccjob_defs

  # Create a file containing a list of history file names that are to be
  # deleted from the back end after the dump completes sucessfully.

  # Leave the last month on disk by shifting the pdump dates back 1 month
  pxdel_vtmp=`echo $pdump_start_year $pdump_start_mon|\
                 awk '{m=$2-1;if(m==0){m=12;y=$1-1}else{y=$1};
                       printf "%2.2d %3.3d",m,y}' -`
  pxdel_start_mon=`echo $pxdel_vtmp|awk '{printf "%2.2d",$1}' -`
  pxdel_start_year=`echo $pxdel_vtmp|awk '{printf "%3.3d",$2}' -`

  pxdel_vtmp=`echo $pdump_end_year $pdump_end_mon|\
                 awk '{m=$2-1;if(m==0){m=12;y=$1-1}else{y=$1};
                       printf "%2.2d %3.3d",m,y}' -`
  pxdel_end_mon=`echo $pxdel_vtmp|awk '{printf "%2.2d",$1}' -`
  pxdel_end_year=`echo $pxdel_vtmp|awk '{printf "%3.3d",$2}' -`

  pxdel_curr_year=`echo $pxdel_start_year|awk '{y=$1-1;printf "%3.3d", y}' -`
  nfdel=0
  while [ $pxdel_curr_year -lt $pxdel_end_year ]; do
    pxdel_curr_year=`echo $pxdel_curr_year|awk '{printf "%3.3d",$1+1}' -`
    if [ $pxdel_curr_year -eq $pxdel_start_year ]; then
      mm=`echo $pxdel_start_mon|awk '{printf "%2.2d",$1-1}' -`
    else
      mm=0
    fi
    if [ $pxdel_curr_year -eq $pxdel_end_year ]; then
      mm_end=$pxdel_end_mon
    else
      mm_end=12
    fi
    while [ $mm -lt $mm_end ]; do
      mm=`echo $mm|awk '{printf "%2.2d",$1+1}' -`
      bname=${pdump_prefix}_${pxdel_curr_year}_m${mm}_
      for suffix in $pdel_suffix_list; do
        mlist=`echo $suffix|awk -F'+' '{print $2}' -`
        mlist=`echo $mlist|sed 's/,/ /g'`
        if [ -n "$mlist" ]; then
          suffix=`echo $suffix|sed 's/+.*$//'`
          # assume that mlist is a white space separated list of numbers
          # indicating which months to dump
          for xx in $mlist; do
            if [ $xx -eq $mm ]; then
              nfdel=`echo $nfdel|awk '{printf "%d",$1+1}' -`
              nfdel=`echo $nfdel|sed -e 's/^ *//' -e 's/^0*//'`
              eval fdel${nfdel}=$bname$suffix
            fi
          done
        else
          nfdel=`echo $nfdel|awk '{printf "%d",$1+1}' -`
          nfdel=`echo $nfdel|sed -e 's/^ *//' -e 's/^0*//'`
          eval fdel${nfdel}=$bname$suffix
        fi
      done
    done
  done

  nn=0
  rm -f fdel_list
  touch fdel_list
  while [ $nn -lt $nfdel ]; do
    nn=`expr $nn + 1`
    eval local_fdel=\$fdel$nn
    [ -z "$local_fdel" ] && bail "Invalid file name in fdel$nn"
    # Create this list in a format that may be inserted directly into a
    # a delete job
    echo "  fdel${nn}=$local_fdel" >> fdel_list
  done

  # Append the number of files to fdel_list
  echo "  nfdel=$nfdel" >> fdel_list

  echo "pdump: fdel_list"
  cat fdel_list

  # Require cccjob
  which cccjob || bail "cccjob is not in your path"

  # Set up the job description

  if [ -n "$mdump_months" ]; then
    # In this case do not use pullfe
    PULLFEJOB=''
  else
    PULLFEJOB='pullfe:s'
  fi

  # Without run time diagnostics the job will
  #   1) copy history files for the current submission to the front end
  #   2) dump these history files to tape
  #   3) Delete these history files from the front end
  #   4) Delete these history files (offset by 1 month) from the back end

  JOBDESC="$PULLFEJOB pxdump=fdel_list:s"

  # With run time diagnostics we need to keep 12 months of history files on
  # the back end.
  # If it is not the end of the year then the job will
  #   1) copy history files for the current submission to the front end
  #   2) dump these history files to tape
  #   3) Delete these history files from the front end
  # If it is the end of the year the job will
  #   1) execute the run time diagnostics on the back end machine
  #   2) copy history files for the current submission to the front end
  #   3) dump these history files to tape
  #   4) Delete these history files from the front end
  #   5) Delete the full year (offset by 1 month) of history files from the back end

  if [ $pdump_with_rtd -eq 1 -a $rtd_in_model_string -eq 0 ]; then
    # Include run time diagnostics with the cfs job

    if [ $next_mon -eq 12 ]; then
      # Execute run time diagnostics at the end of every year

      # Create a list of history files that are to be deleted from the
      # back end after all processing has been done.
      # Leave the last month on disk by shifting the dates back 1 month.
      pxdel_vtmp=`echo $pdump_start_year 1|\
                     awk '{m=$2-1;if(m==0){m=12;y=$1-1}else{y=$1};
                           printf "%2.2d %3.3d",m,y}' -`
      pxdel_start_mon=`echo $pxdel_vtmp|awk '{printf "%2.2d",$1}' -`
      pxdel_start_year=`echo $pxdel_vtmp|awk '{printf "%3.3d",$2}' -`

      pxdel_vtmp=`echo $pdump_end_year 12|\
                     awk '{m=$2-1;if(m==0){m=12;y=$1-1}else{y=$1};
                           printf "%2.2d %3.3d",m,y}' -`
      pxdel_end_mon=`echo $pxdel_vtmp|awk '{printf "%2.2d",$1}' -`
      pxdel_end_year=`echo $pxdel_vtmp|awk '{printf "%3.3d",$2}' -`

      pxdel_curr_year=`echo $pxdel_start_year|awk '{y=$1-1;printf "%3.3d", y}' -`
      nfdel=0
      rm -f fdel_list
      touch fdel_list
      while [ $pxdel_curr_year -lt $pxdel_end_year ]; do
        pxdel_curr_year=`echo $pxdel_curr_year|awk '{printf "%3.3d",$1+1}' -`
        if [ $pxdel_curr_year -eq $pxdel_start_year ]; then
          mm=`echo $pxdel_start_mon|awk '{printf "%2.2d",$1-1}' -`
        else
          mm=0
        fi
        if [ $pxdel_curr_year -eq $pxdel_end_year ]; then
          mm_end=$pxdel_end_mon
        else
          mm_end=12
        fi
        while [ $mm -lt $mm_end ]; do
          mm=`echo $mm|awk '{printf "%2.2d",$1+1}' -`
          bname=${pdump_prefix}_${pxdel_curr_year}_m${mm}_
          for suffix in $pdel_suffix_list; do
            mlist=`echo $suffix|awk -F'+' '{print $2}' -`
            mlist=`echo $mlist|sed 's/,/ /g'`
            if [ -n "$mlist" ]; then
              suffix=`echo $suffix|sed 's/+.*$//'`
              # assume that mlist is a white space separated list of numbers
              # indicating which months to dump
              for xx in $mlist; do
                if [ $xx -eq $mm ]; then
                  nfdel=`echo $nfdel|awk '{printf "%d",$1+1}' -`
                  eval fdel${nfdel}=$bname$suffix
                  # Create the list in a format that may be inserted
                  # directly into a delete job
                  eval echo "  fdel${nfdel}=\$fdel$nfdel" >> fdel_list
                fi
              done
            else
              nfdel=`echo $nfdel|awk '{printf "%d",$1+1}' -`
              eval fdel${nfdel}=$bname$suffix
              # Create the list in a format that may be inserted
              # directly into a delete job
              eval echo "  fdel${nfdel}=\$fdel$nfdel" >> fdel_list
            fi
          done
        done
      done

      # Append the number of files to fdel_list
      echo "  nfdel=$nfdel" >> fdel_list

      echo "pdump with rtd: fdel_list"
      cat fdel_list

      # use_rtd_lock controls whether or not a lock file is used with run time diags
      # If use_rtd_lock is set to "off" then no lock file is used.
      use_rtd_lock=off
      XXX=`echo $use_rtd_lock|sed 's/ //g'`
      eval use_rtd_lock\=$XXX
      [ "$use_rtd_lock" = 'on' ]  && eval use_rtd_lock\=1
      [ "$use_rtd_lock" = 'off' ] && eval use_rtd_lock\=0
      [ "$use_rtd_lock" = 'yes' ] && eval use_rtd_lock\=1
      [ "$use_rtd_lock" = 'no' ]  && eval use_rtd_lock\=0

      if [ $use_rtd_lock -eq 1 ]; then
        # Define rtd_lock_file and add it to cccjob_defs to indicate
        # that run time diagnostics are being run this submission
        rtd_lock_file="${JHOME:-$HOME}/.queue/.crawork/lock_rtd_${fid}_$stamp"
        echo "rtd_lock_file=$rtd_lock_file" >> cccjob_defs
      fi

      # Execute the run time diagnostics, dump files and delete from both
      # the back end and the front end.
      JOBDESC="pxrtd:s $PULLFEJOB pxdump=fdel_list:s"

    else
      # This will be a set of months not ending in Dec
      # Dump these files but do not delete anything from the back end
      JOBDESC="$PULLFEJOB pxdump"
    fi
  fi

  # Create the job to be sent to the front end
  $CCCJOB_ENV cccjob --out=pdump_${fid}_job --job="$JOBDESC" $start_opt $stop_opt cccjob_defs

  # Create the lock file for this job
  [ -z "$pdump_lock" ] && bail "Lock file name is missing."
  cp pdump_${fid}_job $pdump_lock

  ######### submit the job ###########

#  if [ $rtd_in_series -eq 1 ]; then
  if [ $rtd_in_model_string -eq 0 ]; then
    # When the run time diagnostics are to be run in series with
    # the dump job then submit the remote job to the back end
    rem_mach=${back_end_mach:=hadar}

    # This will not work unless mdump_months is non null
    [ -z "$mdump_months" ] && \
      bail "Must set mdump_months when rtd_in_series = on"

    # This will not work when diagnostics are part of the remote job
    [ $pdump_with_diag -eq 1 ] && \
      bail "Cannot set rtd_in_series = on and pdump_with_diag = on simultaneously"

    # This will not work when rtrans is part of the remote job
    [ $pdump_with_rtrans -eq 1 ] && \
      bail "Cannot set rtd_in_series = on and pdump_with_rtrans = on simultaneously"
  else
    # Otherwise submit the remote job to the front end
    rem_mach=${pdump_front_end:=pollux}
  fi

  if [ $pdump_with_mdump -eq 1 ]; then
    # Create a temporary directory in ~/tmp to hold submission files
    host=`hostname` || bail "Problem in hostname"
    psubdir=${JHOME:-$HOME}/tmp/tmp_pdump_${host}_$stamp
    mkdir -p $psubdir || bail "Cannot create $psubdir"

    # Submit this job to run on the front end
    this_remjob="pdump_${fid}_job"
    cp $this_remjob $psubdir
    ssh $rem_mach "cd ${psubdir}; rsub mdest=$rem_mach $this_remjob" 2>&1 || \
      bail "***ERROR*** Remote submission failed: rsub mdest=$rem_mach $this_remjob\n"

    # Clean up the temporary directory
    rm -fr $psubdir
  fi

  if [ $pdump_with_rtd -eq 1 -a $rtd_in_model_string -eq 1 ]; then

    ######### Insert rtd job(s) into the model string ###########

    # run time diagnostics variables
    year_rdiag_start=''
    year_rtdiag_start=''
    yearfirst=''
    PhysA=''
    PhysO=''
    CarbA=''
    CarbO=''
    CarbL=''
    resolution=''
    pldir=''
    rtd_diag_deck=''
    rtd_plot_deck=''
    rtd_path=''
    pdump_rtd_plot_freq=''

    if [ $pdump_end_mon -eq 12 ]; then
      # Execute run time diagnostics at the end of every year

      # Create a string containing the rtd diag job and possibly the rtd plot job

      varlist='runid
               uxxx
               crawork
               noprint
               year_rdiag_start
               year_rtdiag_start
               yearfirst
               PhysA
               PhysO
               CarbA
               CarbO
               CarbL
               resolution
               pldir
               rtd_diag_deck
               rtd_plot_deck
               rtd_path
               JHOME
               JHOME_DATA
               JHOME_RUN
               CCCJOB_ROOT'

      # Create a file containing variable definitions used by the rtd job

      rm -f cccjob_defs
      touch cccjob_defs
      for var in $varlist; do
        eval val=\$$var
        # If this variable is defined add it to the list
        [ -n "$val" ] && echo ${var}=\'$val\' >> cccjob_defs
      done

      # Create the rtd job string

      # rtd_with_plot controls the addition of a plotting job to the job string
      # rtd_plot_freq is the frequency in years at which these plots will be generated
      rtd_plot_freq=${pdump_rtd_plot_freq:=2}
      plot_rtd_year=`echo $pdump_end_year $rtd_plot_freq|awk '{n=$1%$2;printf "%d",n}' -`
      [ -z "$plot_rtd_year" ] && \
        bail "Invalid pdump_end_year = $pdump_end_year or rtd_plot_freq = $rtd_plot_freq"
      if [ \( $plot_rtd_year -eq 0 -a $pdump_end_year -ne $run_start_year \) \
           -o $pdump_end_year -eq $run_stop_year ]; then
        # Execute the run time diagnostics and run the plot job unless
        # the user has explicitly set pdump_rtd_with_plot "off"
        rtd_with_plot=${pdump_rtd_with_plot:=1}
      else
        # Execute the run time diagnostics without any plotting
        rtd_with_plot=0
      fi

      if [ $rtd_with_plot -eq 1 ]; then
        JOBDESC="rtdiag:s rtdplot:s"
      else
        JOBDESC="rtdiag:s"
      fi

      job_string_to_insert="INSERT_JOB_STRING_${runid}_$stamp"
      rm -f $job_string_to_insert
      touch $job_string_to_insert
      $CCCJOB_ENV cccjob --out=$job_string_to_insert --job="$JOBDESC" \
             --start="${pdump_start_year}" --stop="${pdump_end_year}" cccjob_defs

      # Insert the job just created into the existing string from which this job came
      # The variable "crawork" must be defined/exported in the current environment
      splice_job_string $job_string_to_insert >>$error_out 2>&1 ||\
        bail "Problem in splice_job_string"
      rm -f $error_out
      rm -f $job_string_to_insert

    else
      echo "Run time diagnostics is not invoked unless month=12"
    fi
  fi

  # sucessful completion
  exit 0

  # ---Stop_submit_ignore_code----

end_of_script

 . endjcl.cdk

#end_of_job
