#!/bin/sh
#=======================================================================
# Dump history files to tape and run diagnostics, pooling --- pxdiag ---
# create time series and dump diagnostic/pooled/time series files to cfs
# These tasks are done in parallel with a model run.
# $Id: pxdiag_jobdef 670 2012-04-27 19:59:15Z acrnrls $
#=======================================================================
#
# Larry Solheim  May,2011
#
# Files dumped/diagnosed will be of the form
#    ${pxdiag_uxxx}_${runid}_${year}_m${mon}_$suffix
# for all months from previous_year, previous_month to current_year,
# current_month or from current_year, current_month to next_year,
# next_month depending on which of previous_(year|month) or
# next_(year|month) are set.
#
# The variables current_year, current_month, previous_year and
# previous_month are set when the job string is created.
#
# A variable named pxdiag_suffix_list may also be set to modify the list
# of files that will be transferred/dumped. See below for details.
#
# When transferring time series files to a remote machine the user
# must set up public key authentication on the remote machine
# so that transport from Dorval to the remote machine does not require
# a password. This means, create a public/private key pair on alef
# using ssh-keygen (if nessecary) then copy the users public key from
# alef and append it to ~user/.ssh/authorized_keys on the remote machine
#=======================================================================
#
#     keyword :: pxdiag
# description :: parallel dump/diagnose/pool/ts of history files
#        hide :: yes
#
 set -a
 . betapath2

 username=acrnxxx; user=MYNAME; crawork=pxdiag_job_

 nextjob=on
 noprint=on
 debug=off

 gptime=3600; stime=3600; dtime=100; memory1=900mb
 runid=job000; jobname=pxdiag; time=$stime ; memory=$memory1;
 nqsprfx="${runid}_"

 # Temporary directory where this script will run
 CCRNTMP=$CCRNTMP

 # RUNPATH on execution machine
 RUNPATH=$RUNPATH

 # Alternate path to a directory where .queue will be found
 JHOME=''

 if [ -n "$JHOME" -a x"$JHOME" != x"$HOME" ]; then
   # Allow optional reset of DATAPATH/RUNPATH
   JHOME_DATA=''
   DATAPATH=${JHOME_DATA:=$DATAPATH}
   RUNPATH=${JHOME_DATA:=$RUNPATH}
   # Allow optional reset of CCRNTMP
   JHOME_RUN=''
   CCRNTMP=${JHOME_RUN:=$CCRNTMP}
 fi

 # RUNPATH on back end
 BERUNPATH=$BERUNPATH

 . comjcl.cdk
 cat > Execute_Script <<'end_of_script'

  # ---Start_submit_ignore_code----

  set -a

  #DBG Force noprint on for jobs created via cccjob below
  #DBG eval noprint\=on

  # Define a unique stamp for use in file names etc
  stamp=`date "+%j%H%M%S"$$`

  # Use -e option (enable interpretation of backslash escapes) if recognized by echo
  if [ "X`echo -e`" = "X-e" ]; then
    echo_e() { echo ${1+"$@"}; }
  else
    echo_e() { echo -e ${1+"$@"}; }
  fi

  # bail is a simple error exit routine
  # Note: we write the error directly to a file in ~/.queue so that this
  #       info is not lost if/when stdout is not returned
  error_out="${JHOME:-$HOME}/.queue/error_pxdiag_${runid}_$stamp"
  [ ! -z "$error_out" ] && rm -f $error_out
  bail(){
    echo_e `date`" --- pxdiag: $*"
    echo_e `date`" --- pxdiag: $*" >>$error_out
    exit 1
  }

  ToF(){
    #   usage: ToF var_name
    # purpose: Possibly reset the value of var_name to "0" (false) or "1" (true)
    #          If var_name is null or has a value of "off" or "no" then reset to "0"
    #          If var_name has a value of "on" or "yes" then reset to "1"
    #          Otherwise return with var_name unchanged
    [ -z "$1" ] && bail "ToF requires a variable name as an argument"
    eval ToF_var\=\$$1
    XXX=`echo $ToF_var|sed 's/ //g'`
    eval ToF_var\=$XXX
    if [ -n "$ToF_var" ]; then
      if   [ "$ToF_var" = 'on'  ]; then eval ToF_var\=1
      elif [ "$ToF_var" = 'off' ]; then eval ToF_var\=0
      elif [ "$ToF_var" = 'yes' ]; then eval ToF_var\=1
      elif [ "$ToF_var" = 'no'  ]; then eval ToF_var\=0
      else
        eval ToF_var\=\$$1
      fi
    else
      eval ToF_var\=0
    fi
    eval $1=$ToF_var
  }

  # stand_alone_craworkval, if set, will determine if this string is to be run in parallel
  # with a model job or run as a stand alone string of jobs that are run in series
  stand_alone_craworkval=''

  # model_version will be used to determine which diagnostic decks to use
  modver=${modver:=gcm15h}
  model_version=$modver

  runid=job000
  uxxx=mc
  mdump_uxxx=$uxxx
  pdiag_uxxx=$mdump_uxxx
  pxdiag_uxxx=$pdiag_uxxx

  # Use the second character from pxdiag_uxxx to determine the type of
  # pooling and/or the prefix for diagnostic and time series files
  ch2=`echo $pxdiag_uxxx|awk '{print substr($1,2,1)}' -`
  [ -z "$ch2" ] && bail "Invalid pxdiag_uxxx = $pxdiag_uxxx"
  case $ch2 in
    a|m|c|r|d|f) do_nothing=0 ;; # These are valid
    *) bail "Unable to determine pooling type from pxdiag_uxxx = $pxdiag_uxxx" ;;
  esac

  # Is this a coupled model run or an agcm only model run
  if [ \( x"$ch2" = xc \) -o \( x"$ch2" = xf \) ]; then
    coupled=on
  else
    coupled=off
  fi

  # Indicate how cccjob should be invoked
  # Setting CCCJOB_ROOT will allow a job specific version of cccjob to used
  CCCJOB_ROOT=''
  if [ -z "$CCCJOB_ROOT" ]; then
    CCCJOB_ENV=''
  else
    eval CCCJOB_ENV=\'env CCCJOB_ROOT\=$CCCJOB_ROOT\'
  fi

  pdiag_with_cnfs=''
  with_cnfs=${pdiag_with_cnfs:=off}

  # PhysA = on/off   use physical atmosphere variables
  # PhysO = on/off   use physical ocean variables
  # CarbA = on/off   use atmosphere carbon variables
  # CarbO = on/off   use ocean carbon variables
  # CarbL = on/off   use land carbon variables
  PhysA=on
  PhysO=on
  CarbA=on
  CarbO=on
  CarbL=on
  pass_phys_carb=off
  ToF pass_phys_carb

  # When dumprs_sublist = T use dump_sublist rather that dump_list 
  # to dump restarts at the end of each time series chunk
  pdiag_dumprs_sublist=''
  dumprs_sublist=${pdiag_dumprs_sublist:=on}
  ToF dumprs_sublist

  # When pdiag_with_dumprs = T then restarts will be dumped even when
  # history files are not dumped (this is the default behaviour)
  pdiag_with_dumprs=on
  ToF pdiag_with_dumprs

  # These variables must be available here so that they may be set according
  # to a user supplied diag_type below but may also be set directly by the
  # user here when diag_type is not defined (or even if it is defined)
  pdiag_with_cplhist=''
  pdiag_with_cplrs=''
  pdiag_with_dd=''
  pdiag_with_dp=''
  pdiag_with_cc=''
  pdiag_with_ie=''
  pdiag_with_gp6=''
  pdiag_with_xp6=''
  pdiag_with_td=''
  pdiag_with_ds=''
  pdiag_with_dsd=''
  pdiag_with_dsd3D=''
  pdiag_with_dcosp=''
  pdiag_with_mcosp=''
  pdiag_with_cp=''
  pdiag_with_gz=''
  pdiag_with_radforce=''
  pdiag_with_rad_flux_profs=''
  pdiag_transfer_all_dd=''

  # Pass diag2ts_prefix_list on to spltdiag if defined here
  diag2ts_prefix_list=''

  # Pass diag2ts_suffix_list on to spltdiag if defined here
  diag2ts_suffix_list=''

  # Determine if the user has requested a special type of diagnostics
  pdiag_type=''
  diag_type=${pdiag_type:=''}
  if [ -n "$diag_type" ]; then
    # Ensure lower case for diag_type
    diag_type=`echo $diag_type|tr '[A-Z]' '[a-z]'`
    case $diag_type in
      spbc_dd_dp_ds_rf_cosp)
          eval PhysO\=off
          eval CarbO\=off
          pdiag_with_gz=0
          pdiag_with_dd=1
          pdiag_with_dp=1
          pdiag_with_ds=1
          pdiag_with_dsd=1
          pdiag_with_dcosp=1
          pdiag_with_mcosp=1
          pdiag_with_rad_flux_profs=1
          pdiag_transfer_all_dd=0
          ;;
      spbc_dd_dp_ds_rf)
          eval PhysO\=off
          eval CarbO\=off
          pdiag_with_gz=0
          pdiag_with_dd=1
          pdiag_with_dp=1
          pdiag_with_ds=1
          pdiag_with_dsd=1
          pdiag_with_dcosp=0
          pdiag_with_mcosp=0
          pdiag_with_rad_flux_profs=1
          pdiag_transfer_all_dd=0
          ;;
      spbc_dd_dp_rf)
          eval PhysO\=off
          eval CarbO\=off
          pdiag_with_gz=0
          pdiag_with_dd=1
          pdiag_with_dp=1
          pdiag_with_ds=0
          pdiag_with_dsd=0
          pdiag_with_dcosp=0
          pdiag_with_mcosp=0
          pdiag_with_rad_flux_profs=1
          pdiag_transfer_all_dd=0
          ;;
      spbc_dd_dp_rf_mcosp)
          eval PhysO\=off
          eval CarbO\=off
          pdiag_with_gz=0
          pdiag_with_dd=1
          pdiag_with_dp=1
          pdiag_with_ds=0
          pdiag_with_dsd=0
          pdiag_with_dcosp=0
          pdiag_with_mcosp=1
          pdiag_with_rad_flux_profs=1
          pdiag_transfer_all_dd=0
          ;;
      spbc_dd_dp)
          eval PhysO\=off
          eval CarbO\=off
          pdiag_with_gz=0
          pdiag_with_dd=1
          pdiag_with_dp=1
          pdiag_with_ds=0
          pdiag_with_dsd=0
          pdiag_with_dcosp=0
          pdiag_with_mcosp=0
          pdiag_with_rad_flux_profs=0
          pdiag_transfer_all_dd=0
          ;;
      spbc_rf)
          eval PhysO\=off
          eval CarbO\=off
          pdiag_with_gz=0
          pdiag_with_dd=0
          pdiag_with_dp=0
          pdiag_with_ds=0
          pdiag_with_dsd=0
          pdiag_with_dcosp=0
          pdiag_with_mcosp=0
          pdiag_with_rad_flux_profs=1
          pdiag_transfer_all_dd=0
          ;;
      spbc)
          eval PhysO\=off
          eval CarbO\=off
          pdiag_with_gz=0
          pdiag_with_dd=0
          pdiag_with_dp=0
          pdiag_with_ds=0
          pdiag_with_dsd=0
          pdiag_with_dcosp=0
          pdiag_with_mcosp=0
          pdiag_with_rad_flux_profs=0
          pdiag_transfer_all_dd=0
          ;;
      cmam20)
          pdiag_with_cc=1
          pdiag_with_gp6=1
          pdiag_with_xp6=1
          pdiag_with_td=1
          diag2ts_prefix_list="dm"
          diag2ts_suffix_list="gp6"

          if [ $pdiag_with_xp6 -eq 1 ]; then
            diag2ts_suffix_list="$diag2ts_suffix_list xp6"
          fi

          if [ $pdiag_with_cc -eq 1 ]; then
            diag2ts_suffix_list="$diag2ts_suffix_list cc"
          fi

          if [ $pdiag_with_td -eq 1 ]; then
            diag2ts_prefix_list="$diag2ts_prefix_list:mm"
            diag2ts_suffix_list="$diag2ts_suffix_list:td"
          fi

          eval PhysO\=off
          eval CarbA\=off
          eval CarbO\=off
          eval CarbL\=off
          pdiag_with_gz=0
          pdiag_with_dd=0
          pdiag_with_dp=0
          pdiag_with_ds=0
          pdiag_with_dsd=0
          pdiag_with_dcosp=0
          pdiag_with_mcosp=0
          pdiag_with_rad_flux_profs=0
          pdiag_transfer_all_dd=0
          ;;
      ccmi)
          pdiag_with_gp=1
          pdiag_with_xp=1
          pdiag_with_dd=1
          pdiag_with_ie=1
          pdiag_with_cc=0
          pdiag_with_td=0
          diag2ts_prefix_list="dm"
          diag2ts_suffix_list="gp xp dd ie"
          if [ $pdiag_with_cc -eq 1 ]; then
            diag2ts_suffix_list="$diag2ts_suffix_list cc"
          fi
          eval PhysO\=off
          eval CarbA\=off
          eval CarbO\=off
          eval CarbL\=off
          pdiag_with_gz=0
          pdiag_with_dp=0
          pdiag_with_ds=0
          pdiag_with_dsd=0
          pdiag_with_dcosp=0
          pdiag_with_mcosp=0
          pdiag_with_rad_flux_profs=0
          pdiag_transfer_all_dd=0
          ;;
      *) bail "Invalid diag_type = $diag_type"
    esac
  fi

  # with_cplhist flags inclusion of "*_cplhist.nc" files
  with_cplhist=${pdiag_with_cplhist:=off}
  ToF with_cplhist

  # with_cplrs flags inclusion of "*_cplrs.tar" files
  with_cplrs=${pdiag_with_cplrs:=off}
  ToF with_cplrs

  # with_cp flags inclusion of "*_cp" files
  with_cp=${pdiag_with_cp:=$coupled}
  ToF with_cp

  # with_cs flags inclusion of "*_cs" files
  with_cs=$with_cp

  # with_gz flags inclusion of "*_gz" files
  with_gz=${pdiag_with_gz:=$coupled}
  ToF with_gz

  # with_ob flags inclusion of "*_ob" files
  # Assume no "ob" file when there is no "gz" file or the model is not coupled
  with_ob=${pdiag_with_gz:=$coupled}
  ToF with_ob

  # with_os flags inclusion of "*_os" files
  # Assume no "os" file when there is no "gz" file or the model is not coupled
  with_os=${pdiag_with_gz:=$coupled}
  ToF with_os

  # with_td flags inclusion of "*_td" files
  with_td=${pdiag_with_td:=off}
  ToF with_td

  # Radiative forcing specific diagnostics
  with_radforce=${pdiag_with_radforce:=off}
  ToF with_radforce
  with_rad_flux_profs=${pdiag_with_rad_flux_profs:=off}
  ToF with_rad_flux_profs

  # Delete diagnostoic files, or not
  pdiag_delete_diag=on
  ToF pdiag_delete_diag

  # These variables are set when the job string is created
  previous_year=NotSet
  previous_month=NotSet

  current_year=NotSet
  current_month=NotSet

  next_year=NotSet
  next_month=NotSet

  run_start_year=NotSet
  run_start_month=NotSet
  run_stop_year=NotSet
  run_stop_month=NotSet

  # The variables pxdiag_prefix_list and pxdiag_suffix_list are strings containing
  # embedded colons and whitespace which are interpreted as list delimiters.
  # These strings may be thought of as 2 dimensional arrays, the rows of these
  # arrays are colon (:) separated strings and each row is divided into columns
  # by separating on white space.
  # These variables are used by make_file_name_list along with runid and year/mon
  # information from the *_year and *_month variables to generate file names.

  # In make_file_name_list the prefix_list and suffix_list strings are first
  # separated into colon delimited lists (rows). There must be a equal number of
  # rows in each of prefix_list and suffix_list because these rows will be
  # used in pairs.
  # Each pair of rows (one row from prefix_list and one row from suffix_list)
  # is separated into a white space separated list. Each element of these
  # white space separated lists is a single prefix or suffix (possibly modified
  # by appending a "+" followed by a comma separated list of integers in the
  # range 1-12). No white space is allowed within a single prefix or suffix.
  # These individual (pre|suf)fixes are then iterated over for each year and
  # month and for each pair of rows in prefix_list and suffix_list to form the
  # desired set of file names, each of which is of the form
  #
  # ${prefix}_${runid}_${year}_m${mon}_${suffix}

  # Any prefix or suffix in these  lists may be modified by appending a +
  # followed by a comma separated list of numbers (no white space is allowed
  # within this modifier). Each number within the modifier list will correspond
  # to a month (1-12) for which a file with this suffix is to be included.
  # If the modifier exists for a particular suffix then only those months
  # indicated in the modifier will be added to the file list.

  # If the above form of file name is inappropriate then the user may
  # provide a template or templates to produce arbitrary file names.
  # These templates are defined in the variable pxdiag_prefix_list.
  # Any individual prefix will be treated as a file name template if it
  # begins with a "%" character. The template will consist of everthing
  # after the "%" character and up to the next colon or white space.
  # It can be composed of anything but must ultimately (after variable
  # substitution, etc) result in a valid file name. When a template
  # is encountered, it is used as the entire file name (ie the "normal" file
  # name form is disregarded as is the corresponding suffix(s)). However, it
  # is subject to the same interation procedure as a normal prefix and
  # does undergo variable substitution. Variables that are defined for
  # substitution include year, mon, runid, uxxx, start_year, start_mon,
  # stop_year, stop_mon, all of the *_year and *_month variables defined
  # above as well as any user supplied variable definitions passed to
  # make_file_name_list as a command line option of the form var=val.

  case $ch2 in
    a) # AGCM only
      mdump_suffix_list='gs ss rs+12 ab+12 an+12'
      mload_suffix_list='gs ss'
      if [ $with_td -eq 1 ]; then
        mdump_suffix_list="$mdump_suffix_list td"
        mload_suffix_list="$mload_suffix_list td"
      fi
      ;;
    m) # MAM
      mdump_suffix_list='gs ss td rs+12 ab+12 an+12'
      mload_suffix_list='gs ss td'
      ;;
    c) # Coupled
      if [ x"$coupled" = xon -a $with_gz -eq 1 ]; then
        # Fully coupled
        mdump_suffix_list="cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12"
        mload_suffix_list="cm gz gs ss"
      elif [ x"$coupled" = xon -a $with_gz -eq 0 ]; then
        # Reading specified boundary conditions through the coupler
        mdump_suffix_list='gs ss rs+12 ab+12 an+12'
        mload_suffix_list='gs ss'
        if [ $with_cs -eq 1 ]; then
          mdump_suffix_list="$mdump_suffix_list cm cs+12"
          mload_suffix_list="$mload_suffix_list cm"
        fi
        if [ $with_ob -eq 1 ]; then
          mdump_suffix_list="$mdump_suffix_list ob+12"
        fi
        if [ $with_cplhist -eq 1 ]; then
          mdump_suffix_list="$mdump_suffix_list cplhist.nc"
        fi
        if [ $with_cplrs -eq 1 ]; then
          mdump_suffix_list="$mdump_suffix_list cplrs.tar+12"
        fi
      else
        mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
        mload_suffix_list='cm gz gs ss'
      fi
      if [ $with_td -eq 1 ]; then
        mdump_suffix_list="$mdump_suffix_list td"
        mload_suffix_list="$mload_suffix_list td"
      fi
      ;;
    f) # Forecast
      if [ x"$coupled" = xon -a $with_gz -eq 1 ]; then
        # Fully coupled
        mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
        mload_suffix_list='cm gz gs ss'
      elif [ x"$coupled" = xon -a $with_gz -eq 0 ]; then
        # Reading specified boundary conditions through the coupler
        mdump_suffix_list='cm gs ss cs+12 rs+12 ab+12 ob+12 an+12'
        mload_suffix_list='cm gs ss'
      elif [ x"$coupled" = xoff ]; then
        # AGCM only
        mdump_suffix_list='gs ss rs+12 ab+12 an+12'
        mload_suffix_list='gs ss'
      else
        mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
        mload_suffix_list='cm gz gs ss'
      fi
      ;;
    r) # RCM  *** UNTESTED ***
      if [ x"$coupled" = xon -a $with_gz -eq 1 ]; then
        mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
        mload_suffix_list='cm gz gs ss'
      elif [ x"$coupled" = xon -a $with_gz -eq 0 ]; then
        mdump_suffix_list='cm gs ss cs+12 rs+12 ab+12 ob+12 an+12'
        mload_suffix_list='cm gs ss'
      else
        mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
        mload_suffix_list='cm gz gs ss'
      fi
      bail "pxdiag_uxxx = $pxdiag_uxxx is currently not supported"
      ;;
    d) # Data  *** UNTESTED ***
      mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
      mload_suffix_list='cm gz gs ss'
      bail "pxdiag_uxxx = $pxdiag_uxxx is currently not supported"
      ;;
    *) bail "Unable to determine pooling type from pxdiag_uxxx = $pxdiag_uxxx" ;;
  esac

#xxx  if [ x"$coupled" = xon -a $with_gz -eq 1 ]; then
#xxx    mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
#xxx    mload_suffix_list='cm gz gs ss'
#xxx  elif [ x"$coupled" = xon -a $with_gz -eq 0 ]; then
#xxx    mdump_suffix_list='cm gs ss cs+12 rs+12 ab+12 ob+12 an+12'
#xxx    mload_suffix_list='cm gs ss'
#xxx  elif [ x"$ch2" = xa ]; then
#xxx    mdump_suffix_list='gs ss rs+12 ab+12 an+12'
#xxx    mload_suffix_list='gs ss'
#xxx  else
#xxx    mdump_suffix_list='cm gz gs ss os+12 cs+12 rs+12 ab+12 ob+12 an+12'
#xxx    mload_suffix_list='cm gz gs ss'
#xxx  fi

  pdiag_suffix_list=''
  pxdiag_suffix_list=${pdiag_suffix_list:="$mdump_suffix_list"}
#xxx  pxdiag_suffix_list="$mdump_suffix_list"
  suffix_list="${pxdiag_suffix_list:-gs ss gz cm ab+12 ob+12 an+12 rs+12 cs+12 os+12}"

  pdiag_prefix_list=''
  pxdiag_prefix_list=${pdiag_prefix_list:="$mdump_prefix_list"}
#xxx  mdump_prefix_list=''
#xxx  pxdiag_prefix_list="$mdump_prefix_list"
  prefix_list="${pxdiag_prefix_list:-$pxdiag_uxxx}"

  # if set pdiag_diag_suffix_list will contain suffixes of all diagnostic files
  # that are to be processed below
  pdiag_diag_suffix_list=''

  # Create a file containing a list of file names that may then be
  # "sourced" in the current environment to define the variables
  # file1, file2,..., file$join, join. These variables are used by
  # tdumper to compile the list of files to be archived.
  join=0

  # make_file_name_list uses the variables current_year, current_month,
  # previous_year, previous_month, next_year and next_month to
  # determine start and stop dates for file name creation.
  # It also uses runid, prefix_list and suffix_list from the current
  # environment to build these file names.

  # Allow user supplied command line options for make_file_name_list
  # The following invocation of make_file_name_list will not allow multi-list
  # output so if any command line option is supplied that will turn on
  # multi-list output (e.g. --months_max=... --size_max=... --number_max=..)
  # then this script will abort.
  pxdiag_file_list_opts=''
  fopts="${pxdiag_file_list_opts:-}"

  pxdiag_mon_offset=''
  if [ -n "$pxdiag_mon_offset" ]; then
    # Set a user supplied month offset
    eval fopts=\"$fopts --mon_offset\=$pxdiag_mon_offset\"
  fi

  # Create a temporary file containing the file list
  tmp_file_list="${JHOME:-$HOME}/.queue/pxdiag_file_list_${runid}_${stamp}"
  make_file_name_list $fopts --nomulti_list $tmp_file_list >>$error_out 2>&1 ||\
    bail "Problem in make_file_name_list"
  rm -f $error_out

  [ ! -s "$tmp_file_list" ] && bail "Unable to create file list"

  # A file list was created ...source it
  : ; . $tmp_file_list

  # Delete the file that contains the file list
  rm -f $tmp_file_list

  # At this point file1, file2,... and join are defined in the current
  # environment as well as certain other variables such as start_year,
  # start_mon, stop_year and stop_mon which correspond to the start and
  # stop dates for the file names that were created.
  ym_range="${start_year}m${start_mon}_${stop_year}m${stop_mon}"

  # Define module specific start/stop dates
  pxdiag_start_year=$start_year
  pxdiag_start_mon=$start_mon
  pxdiag_stop_year=$stop_year
  pxdiag_stop_mon=$stop_mon

  nfile=$join

  # Create a file containing the list of file names to process
  nsent=0
  rm -f file_list
  touch file_list
  while [ $nsent -lt $nfile ]; do
    nsent=`expr $nsent + 1`
    eval local_file=\$file$nsent
    [ -z "$local_file" ] && bail "Invalid file name in file$nsent"
    # Create this list in a format that may be inserted directly into a
    # a delete job
    echo "  file${nsent}=$local_file" >> file_list
  done

  # Append the number of files to file_list
  echo "  join=$nfile" >> file_list

  echo "pxdiag: file_list"
  cat file_list

  # Insert a user supplied list of files that are to be deleted after
  # they have been dumped to tape. These file names must be assigned to
  # the shell variables fdel01,fdel02,... and the shell variable nfdel
  # must be set to the number in this list. If nfdel=0 then there will
  # be no delete job created below.
  nfdel=0
  # <<INSERT_PXDIAG>>

  rm -f fdel_list
  touch fdel_list
  if [ $nfdel -gt 0 ]; then
    # Create a file containing the list of files to be deleted from the back end
    nn=0
    while [ $nn -lt $nfdel ]; do
      nn=`expr $nn + 1`
      eval local_fdel=\$fdel$nn
      [ -z "$local_fdel" ] && bail "Invalid file name in fdel$nn"
      # Create this list in a format that may be inserted directly into a
      # a delete job
      echo "  file${nn}=$local_fdel" >> fdel_list
    done
    # Append the number of files to fdel_list
    echo "  join=$nfdel" >> fdel_list

    echo "pxdiag: fdel_list"
    cat fdel_list
  fi

  # Set a variable containing the name of the back end machine to which
  # the delete job will be sent
  bemach=''
  pdiag_back_end=${bemach:=''}
  back_end_mach=${pdiag_back_end:='hadar'}

  # gateway is the name of the machine to which files
  # are sent prior to being moved off site
  pdiag_front_end=''
  pxdiag_gateway=${pdiag_front_end:=''}
  gateway=${pxdiag_gateway:='pollux'}

  gateway_is_back_end=0
  case $gateway in
                   c[0-9]*) gateway_is_back_end=1 ;;
    zeta|saiph|spica|hadar) gateway_is_back_end=1 ;;
               za|sa|sp|ha) gateway_is_back_end=1 ;;
  esac

  # Get the name of the machine running this script
  this_mach=`uname -n|awk -F\. '{print \$1}' -`
  case $this_mach in
    c1*) this_mach=spica ;;
    c2*) this_mach=hadar ;;
    c3*) this_mach=rigel ;;
    c4*) this_mach=maia  ;;
    c5*) this_mach=naos  ;;
    c6*) this_mach=saiph ;;
    c7*) this_mach=zeta  ;;
#    ib*) this_mach=ib    ;;
  alef*) this_mach=alef  ;;
   erg*) this_mach=erg   ;;
   ib3*) this_mach=pollux ;;
  esac

  # running_on_gateway, if set, indicates that this script
  # is running on the front end
  running_on_gateway=0
  if [ "$this_mach" = "$gateway" ]; then
    running_on_gateway=1
    echo "pxdiag is running on $gateway"
  fi

  # pdiag_lock is the name of the lock file associated with pdiag.
  # This should only be set if this job string was created by the module pdiag
  pdiag_lock=''

  # Extra ssh args (use -v for debugging connections)
  pxdiag_ssh_args=''
  ssh_args=${pxdiag_ssh_args:-''}

  # Once all the files have been transferred, create a job to dump them
  # to tape, a job to transfer them to a remote machine and a job to run
  # diagnostics (assuming history files are being dumped).

  # if set then mdump_months is used below to configure a serial mdump string
  # that will be comprised of a sequence of tdumper jobs, each of which will
  # dump mdump_months months of history files. It also signifies that no
  # history files should be deleted from the front end.
  mdump_months=''

  # if set then ddump_months is used to determine the frequency at which
  # diagnostic files are dumped.
  # This has implications for when the diag files are dumped. If not set then
  # diag files are dumped each time this script is invoked and for all years
  # from start_(year/mon) to stop_(year/mon).
  # If set the diag files are dumped only at the end of a pooling/time series
  # interval (every chunk_size months) and for all years in the chunk but in
  # ddump_months per arc file
  pdiag_ddump_months=''
  ddump_months=${pdiag_ddump_months:=''}

  # Define a partial file name for the diag job to be created below
  pxdiag_id=pxdiag_${runid}_$ym_range

  # Define the name of the file to contain the job string created below
  pxdiag_job=${pxdiag_id}_job

  # Define the full path name for a lock file corresponding to the job string created below

  if [ $running_on_gateway -eq 1 ]; then
    # If this script is running on the gateway machine
    # then use $HOME for rem_dir
    rem_home=${JHOME:-$HOME}
  else
    # If this script is not running on the gateway machine
    # then set rem_dir to the users home dir on the gateway machine
    rem_home=`ssh $gateway 'echo $HOME'` 2>&1 || \
      bail "***ERROR*** Unable to determine remote home directory.\n"
  fi
  pxdiag_lock="${rem_home}/.queue/.crawork/lock_${pxdiag_id}_$stamp"
  pxdiag_string="${rem_home}/.queue/.crawork/${pxdiag_id}_${stamp}_string"

  # Create the lock files unless this is a serial run
  if [ -z "$stand_alone_craworkval" ]; then

    if [ $running_on_gateway -eq 1 ]; then
      # If this script is running on the gateway machine
      # then create the lock files on this machine
      cp -f file_list $pxdiag_lock
    else
      # If this script is not running on the gateway machine
      # then create the lock files on the gateway machine
      scp file_list ${gateway}:$pxdiag_lock 2>&1 || \
          bail "***ERROR*** Unable to create lock file $pxdiag_lock\n"
    fi

    # This eval is necessary to prevent the shebang line from being
    # blindly replaced with a different shell by submit3
    eval shebang=\#\!/\bin/\sh
    cat > del_part1 <<EOF
$shebang
  set -a
  . betapath2
  jobname=del_list; crawork=delete_job; username=acrnxxx; user=XXX;

  # Alternate path to a directory where .queue will be found
  JHOME=''

  if [ -n "\$JHOME" -a x"\$JHOME" != x"\$HOME" ]; then
    # Allow optional reset of DATAPATH/RUNPATH
    JHOME_DATA=''
    DATAPATH=\${JHOME_DATA:=\$DATAPATH}
    RUNPATH=\${JHOME_DATA:=\$RUNPATH}
    # Allow optional reset of CCRNTMP
    JHOME_RUN=''
    CCRNTMP=\${JHOME_RUN:=\$CCRNTMP}
  fi

  noprint=on
  nextjob=on

  #  * ........................... Parmsub Parameters ....................
  lopgm="lopgm"; stime="3600"; memory1="24mb";

  # Define the name of the pdiag_lock file, if any
  pdiag_lock=$pdiag_lock

  # Initialize join in case the file list is not included
  join=0

EOF

    cat > del_part2_mdump <<EOF
  # Remove the pxdiag lock file
  rm -f $pxdiag_lock
  # ---Start_submit_ignore_code----
  sleep 120
  # ---Stop_submit_ignore_code----

EOF

    cat > del_part4 <<'EOF'

  #  * ............................ Condef Parameters ............................
  noprint=on
  nextjob=on

  #  * ............................. Deck Definition .............................
  . comjcl.cdk
  cat > Execute_Script <<'end_of_exec_script'
    nn=0
    while [ $nn -lt $join ]; do
      nn=`expr $nn + 1`
      eval FILE=\$file$nn
      access $FILE $FILE nocp na
      delete $FILE na
    done

    # Remove the pdiag lock file if it exists
    [ ! -z "$pdiag_lock" ] && rm -f $pdiag_lock

end_of_exec_script
EOF
    echo "  . endjcl.cdk" >> del_part4
    echo "#end_of_job"    >> del_part4

    if [ -n "$mdump_months" ]; then
      cat del_part1 del_part2_mdump           del_part4 > rm_lock_job
    else
      cat del_part1 del_part2_mdump file_list del_part4 > rm_lock_job
    fi

  fi # if [ -z "$stand_alone_craworkval" ]; then

  # Require cccjob
  which cccjob || bail "cccjob is not in your path"

  ######### Create the job string ###########

    # These variable may be set when the pxdiag job string is created
    # If a variable is set then it will be used in the mdump job

    mdump_uxxx=''
    mdump_suffix_list="$pxdiag_suffix_list"
    mdump_cfsuser=''
    mdump_CCRNTMP=''
    pxdiag_CCRNTMP=${mdump_CCRNTMP:=$CCRNTMP}
    mdump_RUNPATH=''
    pxdiag_RUNPATH=${mdump_RUNPATH:=$RUNPATH}
    mdump_qsublog=''
    mdump_with_lock_file=''
    mdump_sv=''
    check_cfs_arcfile=''
    nontwrkchk=''
    cfsuser=''
    masterdir=off
    shortermdir=on
    nolist=''
    pxdel_lock_check=''
    with_lsarc=''

    # year_offset is used by some diagnostic decks
    year_offset=0

    # If next_block_lock is defined it will be the name of a lock file that was created
    # by the "block" module. pxdiag should remove this lock file after it finishes
    # processing at the end of each time series chunk
    next_block_lock=''

    # If pxdel_check_last_mon = on then only lock files with a date range ending with
    # the current year and month will be checked for existence.
    pxdel_check_last_mon=''

    bemach=''

    # besc = on causes a tdumper job to run on the back end (spica/hadar)
    pxdiag_besc=''
    besc=${pxdiag_besc:=''}

    # This must be done after pxdiag_CCRNTMP has been defined as mdump_CCRNTMP above
    if [ x"$with_cnfs" = xon ]; then
      # Hard code the name of the temporary dir used on cnfs for use with mdump jobs
      # This will likely be revised in the future
      mdump_CCRNTMP=/cnfs/dev/crb/linux/cnfs_ccrn01/data/utmp
    fi

    mload_uxxx=${mdump_uxxx:-''}
    mload_cfsuser=${mdump_cfsuser:-''}
    mload_CCRNTMP=${pxdiag_CCRNTMP:-''}
    mload_RUNPATH=${pxdiag_RUNPATH:-''}
    mload_sv=''

    mdelete_uxxx=${mdump_uxxx:-''}
    mdelete_suffix_list="$pxdiag_suffix_list"
    mdelete_CCRNTMP=${pxdiag_CCRNTMP:-''}
    mdelete_RUNPATH=${pxdiag_RUNPATH:-''}
    mdelete_leave_last_mon=off

    othr2fe_uxxx=${mdump_uxxx:-''}
    othr2fe_suffix_list="$pxdiag_suffix_list"
    othr2fe_CCRNTMP=${pxdiag_CCRNTMP:-''}
    othr2fe_RUNPATH=${pxdiag_RUNPATH:-''}

    diag_CCRNTMP=${pxdiag_CCRNTMP:-''}
    diag_RUNPATH=${pxdiag_RUNPATH:-''}

    # load_hist will determine how history files get onto the front end
    # to be used by the diagnostics when diag_job has a non-null value
    # load_hist = off  ==>  Save history files on the front end as they are dumped
    # load_hist = on   ==>  Load history files from the cfs
    load_hist=off
    [ -n "$stand_alone_craworkval" ] && load_hist=on
    XXX=`echo $load_hist|sed 's/ //g'`
    eval load_hist\=$XXX
    [ "$load_hist" = 'on'  ] && eval load_hist\=1
    [ "$load_hist" = 'off' ] && eval load_hist\=0
    [ "$load_hist" = 'yes' ] && eval load_hist\=1
    [ "$load_hist" = 'no'  ] && eval load_hist\=0

    # Chunk size (in months) for pooling, dumping diags and creating time series
    pdiag_chunk_size=''
    chunk_size=${pdiag_chunk_size:=120}

    # first_chunk_size is the length (in months) of the first chunk in the run
    # This is the chunk that starts at run_start_year/run_start_month
    if [ -z "$pdiag_chunk_size" ]; then
      # If the user has not explicitly set chunk_size then
      # then first_chunk_size defaults to chunk_size + 1 year
      pdiag_first_chunk_size=`expr $chunk_size + 12`
    else
      # If the user has explicitly set chunk_size then
      # first_chunk_size defaults to chunk_size
      # In either case the user may also set first_chunk_size explicitly
      pdiag_first_chunk_size=$chunk_size
    fi
    first_chunk_size=$pdiag_first_chunk_size

    # Do not allow first_chunk_size to be zero
    # The script below assumes that it will always be greater than zero
    [ $first_chunk_size -eq 0 ] && eval first_chunk_size\=$chunk_size

    # Initialize is_chunk_time, a boolean flag used to determine when the extra
    # decks (pooling, time series, delete diag files, etc) are to be run
    is_chunk_time=0

    # Initialize is_first_chunk, a boolean flag used to identify the first time
    # the extra decks are run
    is_first_chunk=0

    # Determine the number of months in the entire job
    months_in_run=`echo $run_start_year $run_start_month $run_stop_year $run_stop_month|\
                    awk '{if ($1<=0 || $3<=0 || $3<$1) {print "0"; exit}
                          if ($2<1 || $2>12 || $4<1 || $4>12) {print "0"; exit}
                          if ($1 == $3) {m=$4-$2+1}
                          else {m=($3-$1-1)*12+13-$2+$4}; printf "%d",m}' -`
    [ $months_in_run -le 0 ] && \
      bail "Invalid months_in_run = $months_in_run ...run_start_year = $run_start_year run_start_month = $run_start_month run_stop_year = $run_stop_year run_stop_mon = $run_stop_mon"

    # Check that first_chunk_size is at least a large as the entire run
    [ $first_chunk_size -gt $months_in_run ] && \
      bail "The first chunk size $first_chunk_size is greater than the entire run $months_in_run"

    # Determine if this is the first job in the string
    # Note that in general this job will not end after chunk_size months and
    # therefore will not run the extra pooling, etc decks. Look for is_first_chunk
    # below for a flag that will indicate when the first set of extra decks will run
    is_first_job=`echo $run_start_year $run_start_month $start_year $start_mon|\
      awk '{if (($1==$3 && $2>=$4) || $3<$1) {print "1"} else {print "0"}}' -`

    # Determine if this is the last job in the string
    is_last_job=`echo $run_stop_year $run_stop_month $stop_year $stop_mon|\
      awk '{if (($1==$3 && $2<=$4) || $3>$1) {print "1"} else {print "0"}}' -`

    # This flag determines whether or not time series are to be created
    pdiag_with_time_series=''
    with_time_series=${pdiag_with_time_series:=on}
    XXX=`echo $with_time_series|sed 's/ //g'`
    eval with_time_series\=$XXX
    [ "$with_time_series" = 'on'  ] && eval with_time_series\=1
    [ "$with_time_series" = 'off' ] && eval with_time_series\=0
    [ "$with_time_series" = 'yes' ] && eval with_time_series\=1
    [ "$with_time_series" = 'no'  ] && eval with_time_series\=0

    # remusr is the login name of the user on the remote machine
    pdiag_remusr=''
    remusr=${pdiag_remusr:=`whoami`}

    # remserver is the fully qualified domain name of the remote machine
    pdiag_remserver=''
    remserver=${pdiag_remserver:='lxsrv.cccma.ec.gc.ca'}

    # If set RMTRUNPATH will be a directory on the remote machine into which
    # files will be copied after being transferred from Dorval
    pdiag_RMTRUNPATH=''
    RMTRUNPATH=${pdiag_RMTRUNPATH:=''}

    # This flag determines whether or not time series are to be copied
    # to a remote machine (currently lxsrv at UVic, meaning this should
    # only be used when running on a machine in Dorval)
    # Setting pdiag_RMTRUNPATH implies pdiag_transfer_time_series = on, therefore
    # it is not necessary to set pdiag_transfer_time_series when pdiag_RMTRUNPATH
    # is set. If pdiag_transfer_time_series is set explicitly, it will determine
    # whether or not the transfer should take place.
    pdiag_transfer_time_series=''

    if [ -z "$RMTRUNPATH" ]; then
      # If the user has not set pdiag_RMTRUNPATH then transfer_time_series will default
      # to "off", but may be set explicitly by setting pdiag_transfer_time_series
      transfer_time_series=${pdiag_transfer_time_series:=off}
    else
      # If the user has set pdiag_RMTRUNPATH then transfer_time_series will default
      # to "on", but may be set explicitly by setting pdiag_transfer_time_series
      transfer_time_series=${pdiag_transfer_time_series:=on}
    fi

    # In all cases, if with_time_series is false then transfer_time_series is false
    [ $with_time_series -eq 0 ] && eval transfer_time_series\=off
    XXX=`echo $transfer_time_series|sed 's/ //g'`
    eval transfer_time_series\=$XXX
    [ "$transfer_time_series" = 'on'  ] && eval transfer_time_series\=1
    [ "$transfer_time_series" = 'off' ] && eval transfer_time_series\=0
    [ "$transfer_time_series" = 'yes' ] && eval transfer_time_series\=1
    [ "$transfer_time_series" = 'no'  ] && eval transfer_time_series\=0

    if [ $transfer_time_series -eq 1 ]; then
      # If RMTRUNPATH is set then ensure that it exists on the remote machine and
      # contains a "time" and a "pooled" subdirectory. If not create these dirs.
      if [ -n "$RMTRUNPATH" ]; then
        # Look for the dir RMTRUNPATH on the remote machine
        fail=0
        ssh $this_mach ssh $remserver test -d $RMTRUNPATH 2>/dev/null || fail=1
        if [ $fail -eq 1 ]; then
          # The dir does not exist
          # Make sure there is no regular file by that name on the remote machine
          fail=0
          ssh $this_mach ssh $remserver test -f $RMTRUNPATH 2>/dev/null || fail=1
          [ $fail -eq 0 ] && bail "Regular file $RMTRUNPATH exists on $remserver"

          # Create the RMTRUNPATH dir and ensure the permissions are correct
          fail=0
          ssh $this_mach ssh $remserver "mkdir -m 770 $RMTRUNPATH" 2>&1 || fail=1
          [ $fail -eq 1 ] && bail "Unable to create $RMTRUNPATH on $remserver"
        fi

        # Look for RMTRUNPATH/time on the remote machine
        fail=0
        ssh $this_mach ssh $remserver test -d $RMTRUNPATH/time 2>/dev/null || fail=1
        if [ $fail -eq 1 ]; then
          # Create the RMTRUNPATH/time dir
          fail=0
          ssh $this_mach ssh $remserver "mkdir -m 770 $RMTRUNPATH/time" 2>&1 || fail=1
          [ $fail -eq 1 ] && bail "Unable to create $RMTRUNPATH/time on $remserver"
        fi

        # Look for RMTRUNPATH/pool on the remote machine
        fail=0
        ssh $this_mach ssh $remserver test -d $RMTRUNPATH/pool 2>/dev/null || fail=1
        if [ $fail -eq 1 ]; then
          # Create the RMTRUNPATH/pool dir
          fail=0
          ssh $this_mach ssh $remserver "mkdir -m 770 $RMTRUNPATH/pool" 2>&1 || fail=1
          [ $fail -eq 1 ] && bail "Unable to create $RMTRUNPATH/pool on $remserver"
        fi
      fi
    fi

    # Normally only a subset of "_dd" files are transfered
    # Setting transfer_all_dd true will cause all "_dd" files to be transfered
    transfer_all_dd=${pdiag_transfer_all_dd=off}

    # This flag will turn on or turn off dumping of history files
    pdiag_dump_hist=on
    ToF pdiag_dump_hist

    # This flag will turn on or turn off dumping of diagnostic files
    pdiag_dump_diag=on
    ToF pdiag_dump_diag

    # This flag determines whether or not history files are to be dumped to cfs
    # If they are not dumped they are simply copied from the back end to the front end
    dump_or_copy_hist=${pdiag_dump_or_copy_hist:=on}
    ToF dump_or_copy_hist

    # When history files are not dumped but simply copied to the front end
    # del_hist_after_copy will flag deletion of the history files from the back
    # end after they are copied
    del_hist_after_copy=off
    ToF del_hist_after_copy

    # This flag determines whether or not pooling/time series etc will be run
    # at the end of a run when the last "chunk" is shorter than the standard chunk
    pdiag_with_partial_chunk=''
    with_partial_chunk=${pdiag_with_partial_chunk:=on}
    ToF with_partial_chunk

    # pdiag_with_pool allows the user to run or not run pooling
    pdiag_with_pool=''
    with_pool=${pdiag_with_pool:=on}
    ToF with_pool

    # Set the prefix for diagnostic files
    diag_uxxx="d$ch2"

    # Set the prefix for history files
    model_uxxx="m$ch2"

    # with_dd flags inclusion of "*_dd" files
    # "_dd" files contain (sub) daily 2D variables
    with_dd=${pdiag_with_dd:=on}
    ToF with_dd

    # with_dp flags inclusion of "*_dp" files
    # "_dp" files contain (sub) daily 3D variables on pressure levels
    with_dp=${pdiag_with_dp:=on}
    ToF with_dp

    # with_cc flags inclusion of "*_cc" files
    with_cc=${pdiag_with_cc:=off}
    ToF with_cc

    # with_ie flags inclusion of "*_ie" files
    with_ie=${pdiag_with_ie:=off}
    ToF with_ie

    # with_gp6 flags inclusion of "*_gp6" files
    with_gp6=${pdiag_with_gp6:=off}
    ToF with_gp6

    # with_xp6 flags inclusion of "*_xp6" files
    with_xp6=${pdiag_with_xp6:=off}
    ToF with_xp6

    # The value of the parameter daily_cmip5_tiers found in the diagnostic deck
    # determines whether or not "*_dd" and/or "*_dp" files are created
    # daily_cmip5_tiers contains "1"  =>  "*_dd" files are created
    # daily_cmip5_tiers contains "2"  =>  "*_dp" files are created
    if [ $with_dd -eq 1 -a $with_dp -eq 1 ]; then
      daily_cmip5_tiers='1 2'
    elif [ $with_dd -eq 1 ]; then
      daily_cmip5_tiers='1'
    elif [ $with_dp -eq 1 ]; then
      daily_cmip5_tiers='2'
    else
      daily_cmip5_tiers='0'
    fi

    # with_ds flags inclusion of "*_ds" files
    with_ds=${pdiag_with_ds:=off}
    ToF with_ds

    pdiag_gssave=''

    # The parameter gssave found in the diagnostic deck determines
    # whether or not the "*_ds" files are created
    # "_ds" files contain (sub)daily 3D variables on model levels
    # For CMAM20 diagnostics gssave is used in gpintstat2_cmam20.dk and, apparently,
    # is always "on". So why use it ...who knows.
    if [ $with_ds -eq 1 -o x"$diag_type" = xcmam20 -o x"$diag_type" = xccmi ]; then
      gssave=on
    else
      gssave=${pdiag_gssave:=off}
    fi

    # with_dsd flags inclusion of additional CFMIP daily fields
    # Files with the "dsd" suffix are created in daily_cmip5_cfmip_v2_dsd3D
    with_dsd=${pdiag_with_dsd:=off}
    ToF with_dsd

    # dsd3D is used in daily_cmip5_cfmip_v2_dsd3D
    # to flag saving of daily 3D fields
    dsd3D=${pdiag_with_dsd3D:=off}
    ToF with_dsd3D

    # with_dcosp flags inclusion of "*_dcosp" files (daily COSP)
    # Files with the "dcosp" suffix are created in daily_cmip5_cosp_v2
    with_dcosp=${pdiag_with_dcosp:=off}
    ToF with_dcosp

    # with_mcosp flags inclusion of monthly COSP files found in the gp file
    # Monthly COSP files with are created in 
    with_mcosp=${pdiag_with_mcosp:=off}
    ToF with_mcosp

    # If diag_job is set then it must be the name of diagnostic job that
    # is found on the users path (or in a default location, see below)
    # If set then the diagnostics will run after history files are dumped
    diag_job=''

    if [ -n "$diag_job" ]; then
      # The user has supplied the name of a diagnostic job, meaning that
      # diagnostics are to be run in the same job string as the dumps

      # Define diag_path which will be the full pathname to diag_job
      diag_path=''

      # If diag_job is a valid pathname then use it as is
      if [ -s "$diag_job" ]; then
        diag_path=$diag_job
      fi

      # If diag_job is not a valid pathname then look on the users path
      if [ -z "$diag_path" ]; then
        [ -z "$PATH" ] && bail "PATH is not set"
        for pdir in `echo $PATH|sed 's/:/ /g'`; do
          if [ -s "$pdir/$diag_job" ]; then
            diag_path=$pdir/$diag_job
            break
          fi
        done
      fi

      # If not found on the users path then look in the cccjob jobdefs dir
      if [ -z "$diag_path" ]; then
        if [ -s $HOME/cccjobs/.cccjobrc ]; then
          # If the cccjob startup file exists then attempt to
          # get a definition for CCCJOB_ROOT from that file
          . $HOME/cccjobs/.cccjobrc || :
        fi
        if [ -n "$CCCJOB_ROOT" ]; then
          def_path="$CCCJOB_ROOT/lib/jobdefs"
        else
          def_path="$CCRNSRC/cccjob_dir/pub/lib/jobdefs"
        fi
        if [ -s "$def_path/${diag_job}_jobdef" ]; then
          # Get a local copy of the diagnostic job and name it diag_job
          # This local copy will (possibly) be modified by commenting
          # or uncommenting certain decks in the file
          cp $def_path/${diag_job}_jobdef $diag_job
          diag_path=$diag_job

          tmp_diag_job=tmp_${diag_job}_$stamp

          # Determine the set of diagnostic decks to use based on the model version
          # Any of these deck names may be overwritten by user supplied values
          # Set default deck names for any version prior to gcm16
          # These are the decks that were used for AR5
          delmodinfo_deck=delmodinfo.dk
          gpintstat_deck=gpintstat2.dk
          gsstats_deck=gsstats2.dk
          xstats_deck=xstats5.dk
          xtrachem_deck=xtrachem.dk
          xtraconv_deck=xtraconv.dk
          cosp_deck=cosp_sim.dk
          cmstats_deck=cmstats2.dk
          cldmic_deck=''
          cfmip_monthly_deck=cfmip_monthly_rad_force.dk
          daily_cmip5_deck=daily_cmip5.dk
          daily_cmip5_cfmip_deck=daily_cmip5_cfmip_v2_dsd3D.dk
          daily_cmip5_cosp_deck=daily_cmip5_cosp_v2.dk
          xtradust_bulk_deck=''
          aodpth_volc_deck=''
          aerorf_deck=''
          case $model_version in
            gcm17)
              gsstats_deck=gsstats5.dk
              cosp_deck=cosp3.dk
              cldmic_deck=cldmic.dk
              # cfmip_monthly_deck=cfmip_monthly_extra_fields_gcm16.dk
              cfmip_monthly_deck=cfmip_monthly_xtra_fields.dk
              xtradust_bulk_deck=xtradust_bulk.dk
              aodpth_volc_deck=aodpth_volc1.dk
              aerorf_deck=aerorf.dk
              ;;
            gcm16)
              gsstats_deck=gsstats4.dk
              cosp_deck=cosp3.dk
              cldmic_deck=cldmic.dk
              cfmip_monthly_deck=cfmip_monthly_extra_fields_gcm16.dk
              aodpth_volc_deck=aodpth_volc.dk
              ;;
            gcm15[bcdefgh]) ;; # These are valid model versions
            gcm13[bcde])    ;; # These are valid model versions
            *) bail "Invalid model_version = $model_version" ;;
          esac

          # Interpolate to pressure levels + PMSL
          pdiag_gpintstat=on
          ToF pdiag_gpintstat
          if [ $pdiag_gpintstat -eq 1 ]; then
            sed 's/^[# ]*\. *gpintstat[0-9]*.dk/  . '$gpintstat_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_gpintstat -eq 0 ]; then
            sed 's/^ *\. *gpintstat[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # Model physics statistics
          pdiag_gsstats=on
          ToF pdiag_gsstats
          if [ $pdiag_gsstats -eq 1 ]; then
            sed 's/^[# ]*\. *gsstats[0-9]*.dk/  . '$gsstats_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_gsstats -eq 0 ]; then
            sed 's/^ *\. *gsstats[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # Tracer statistics
          pdiag_xstats=on
          ToF pdiag_xstats
          if [ $pdiag_xstats -eq 1 ]; then
            sed 's/^[# ]*\. *xstats[0-9]*.dk/  . '$xstats_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_xstats -eq 0 ]; then
            sed 's/^ *\. *xstats[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # xtrachem diagnostics
          pdiag_xtrachem=on
          ToF pdiag_xtrachem
          if [ $pdiag_xtrachem -eq 1 ]; then
            sed 's/^[# ]*\. *xtrachem[0-9]*.dk/  . '$xtrachem_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_xtrachem -eq 0 ]; then
            sed 's/^ *\. *xtrachem[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # xtraconv diagnostics
          pdiag_xtraconv=on
          ToF pdiag_xtraconv
          if [ $pdiag_xtraconv -eq 1 ]; then
            sed 's/^[# ]*\. *xtraconv[0-9]*.dk/  . '$xtraconv_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_xtraconv -eq 0 ]; then
            sed 's/^ *\. *xtraconv[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # COSP diagnostics
          # This deck adds a number of COSP variables from the "gp" and "xp" files
          if [ $with_mcosp -eq 1 ]; then
            pdiag_cosp_sim=on
          else
            pdiag_cosp_sim=off
          fi
          ToF pdiag_cosp_sim
          if [ $pdiag_cosp_sim -eq 1 ]; then
            sed 's/^[# ]*\. *cosp_sim[0-9]*.dk/  . '$cosp_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_cosp_sim -eq 0 ]; then
            sed 's/^ *\. *cosp_sim[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # Cloud microphysics
          if [ -z "$cldmic_deck" ]; then
            pdiag_cldmic=off
          else
            pdiag_cldmic=on
          fi
## TODO ## disable cldmic for the time being
          pdiag_cldmic=off
          ToF pdiag_cldmic
          if [ $pdiag_cldmic -eq 1 ]; then
            sed 's/^ *[# ]*\. *cldmic[0-9]*.dk/  . '$cldmic_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          else
            sed 's/^ *\. *cldmic[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # CFMIP monthly radiative forcing diagnostics
          # This deck adds a number of radiative forcing variables to the "gp" and "xp" files
          if [ $with_radforce -eq 1 -o $with_rad_flux_profs -eq 1 ]; then
            pdiag_cfmip_monthly_rad_force=on
          else
            pdiag_cfmip_monthly_rad_force=off
          fi
          ToF pdiag_cfmip_monthly_rad_force
          if [ $pdiag_cfmip_monthly_rad_force -eq 1 ]; then
            sed 's/^[# ]*\. *cfmip_monthly_rad_force[0-9]*.dk/  . '$cfmip_monthly_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_cfmip_monthly_rad_force -eq 0 ]; then
            sed 's/^ *\. *cfmip_monthly_rad_force[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # xtra dust bulk
          if [ -z "$xtradust_bulk_deck" ]; then
            pdiag_xtradust_bulk=off
          else
            pdiag_xtradust_bulk=on
          fi
## TODO ## disable xtradust_bulk for the time being
          pdiag_xtradust_bulk=off
          ToF pdiag_xtradust_bulk
          if [ $pdiag_xtradust_bulk -eq 1 ]; then
            sed 's/^[# ]*\. *xtradust_bulk[0-9]*.dk/  . '$xtradust_bulk_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          else
            sed 's/^ *\. *xtradust_bulk[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # Aerosol radiative properties
          if [ -z "$aodpth_volc_deck" ]; then
            pdiag_aodpth_volc=off
          else
            pdiag_aodpth_volc=on
          fi
## TODO ## disable aodpth_volc for the time being
          pdiag_aodpth_volc=off
          ToF pdiag_aodpth_volc
          if [ $pdiag_aodpth_volc -eq 1 ]; then
            sed 's/^[# ]*\. *aodpth_volc[0-9]*.dk/  . '$aodpth_volc_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          else
            sed 's/^ *\. *aodpth_volc[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # aerorf
          if [ -z "$aerorf_deck" ]; then
            pdiag_aerorf=off
          else
            pdiag_aerorf=on
          fi
## TODO ## disable aerorf for the time being
          pdiag_aerorf=off
          ToF pdiag_aerorf
          if [ $pdiag_aerorf -eq 1 ]; then
            sed 's/^[# ]*\. *aerorf[0-9]*.dk/  . '$aerorf_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          else
            sed 's/^ *\. *aerorf[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # CMIP5 daily fields (dd, dp and ds files)
          if [ $with_dd -eq 1 -o $with_dp -eq 1 -o $with_ds -eq 1 ]; then
            pdiag_daily_cmip5=on
          else
            pdiag_daily_cmip5=off
          fi
          ToF pdiag_daily_cmip5
          if [ $pdiag_daily_cmip5 -eq 1 ]; then
            sed 's/^[# ]*\. *daily_cmip5[0-9]*.dk/  . '$daily_cmip5_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_daily_cmip5 -eq 0 ]; then
            sed 's/^ *\. *daily_cmip5[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # Additional CMIP5 daily fields
          # daily_cmip5_cfmip_v2_dsd3D appends to the "_dd" file when daily_cmip5_tiers
          # contains "1" and creates/appends the "_dsd" file
          if [ $with_dsd -eq 1 ]; then
            pdiag_daily_cmip5_cfmip_v2_dsd3D=on
          else
            pdiag_daily_cmip5_cfmip_v2_dsd3D=off
          fi
          ToF pdiag_daily_cmip5_cfmip_v2_dsd3D
          if [ $pdiag_daily_cmip5_cfmip_v2_dsd3D -eq 1 ]; then
            sed 's/^[# ]*\. *daily_cmip5_cfmip_v2_dsd3D[0-9]*.dk/  . '$daily_cmip5_cfmip_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_daily_cmip5_cfmip_v2_dsd3D -eq 0 ]; then
            sed 's/^ *\. *daily_cmip5_cfmip_v2_dsd3D[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # COSP daily diagnostics
          if [ $with_dcosp -eq 1 ]; then
            pdiag_daily_cmip5_cosp_v2=on
          else
            pdiag_daily_cmip5_cosp_v2=off
          fi
          ToF pdiag_daily_cmip5_cosp_v2
          if [ $pdiag_daily_cmip5_cosp_v2 -eq 1 ]; then
            sed 's/^[# ]*\. *daily_cmip5_cosp[0-9]*.dk/  . '$daily_cmip5_cosp_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_daily_cmip5_cosp_v2 -eq 0 ]; then
            sed 's/^ *\. *daily_cmip5_cosp_v2[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

          # Create cp files
          if [ $with_cp -eq 1 ]; then
            pdiag_cmstats=on
          else
            pdiag_cmstats=off
          fi
          ToF pdiag_cmstats
          if [ $pdiag_cmstats -eq 1 ]; then
            # Add cmstats to the diagnostic job
            sed 's/^[# ]*\. *cmstats[0-9]*.dk/  . '$cmstats_deck'/' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          elif [ $pdiag_cmstats -eq 0 ]; then
            # Remove cmstats from the diagnostic job
            sed 's/^ *\. *cmstats[0-9]*.dk.*$//' $diag_job > $tmp_diag_job
            mv $tmp_diag_job $diag_job
          fi

        fi
      fi

      # If not found above then look in a (possibly user supplied) dir
      diag_dir="/users/tor/acrn/esm/PRODUCTION/CCCJOB_TEMPLATE/diag_scripts"
      if [ -z "$diag_path" ]; then
        [   -z "$diag_dir" ] && bail "diag_dir is undefined."
        [ ! -d "$diag_dir" ] && bail "diag_dir = $diag_dir is not a directory."
        [ ! -x "$diag_dir" ] && bail "Cannot read diag_dir = $diag_dir"
        diag_path="$diag_dir/$diag_job"
      fi

      # Ensure this is a valid path name
      [   -z "$diag_path" ] && bail "$diag_job is missing or empty"
      [ ! -s "$diag_path" ] && bail "$diag_path is missing or empty"
    fi

    # Determine the number of months in the current job
    months_in_this_job=`echo $start_year $start_mon $stop_year $stop_mon|\
                    awk '{if ($1<=0 || $3<=0 || $3<$1) {print "0"; exit}
                          if ($2<1 || $2>12 || $4<1 || $4>12) {print "0"; exit}
                          if ($1 == $3) {m=$4-$2+1}
                          else {m=($3-$1-1)*12+13-$2+$4}; printf "%d",m}' -`
    [ $months_in_this_job -le 0 ] && \
      bail "Invalid months_in_this_job = $months_in_this_job ...start_year = $start_year start_mon = $start_mon stop_year = $stop_year stop_mon = $stop_mon"

    # Determine the number of months from the start
    # of the run to the end of the current job
    months_since_run_start=`echo $run_start_year $run_start_month $stop_year $stop_mon|\
                    awk '{if ($1<=0 || $3<=0 || $3<$1) {print "0"; exit}
                          if ($2<1 || $2>12 || $4<1 || $4>12) {print "0"; exit}
                          if ($1 == $3) {m=$4-$2+1}
                          else {m=($3-$1-1)*12+13-$2+$4}; printf "%d",m}' -`
    [ $months_since_run_start -le 0 ] && \
      bail "Invalid months_since_run_start = $months_since_run_start ...run_start_year = $run_start_year run_start_month = $run_start_month stop_year = $stop_year stop_mon = $stop_mon"

    # Determine the number of months from the start of the current job
    # of the end of the run
    months_until_run_stop=`echo $start_year $start_mon $run_stop_year $run_stop_month|\
                      awk '{if ($1<=0 || $3<=0 || $3<$1) {print "0"; exit}
                            if ($2<1 || $2>12 || $4<1 || $4>12) {print "0"; exit}
                            if ($1 == $3) {m=$4-$2+1}
                            else {m=($3-$1-1)*12+13-$2+$4}; printf "%d",m}' -`
    [ $months_until_run_stop -le 0 ] && \
      bail "Invalid months_until_run_stop = $months_until_run_stop ...start_year = $start_year start_mon = $start_mon run_stop_year = $run_stop_year run_stop_month = $run_stop_month"

    # If chunk_size divides evenly into months_since_run_start (accounting for a
    # possibly different first chunk size) or this is the last job in the run
    # then it is time to run spltdiag, pooling and delete the diagnostic files
    is_chunk_time=`echo $months_since_run_start $first_chunk_size $chunk_size | awk '
                  {if ($1<$2)  {print "0"; exit};
                   if ($1==$2) {print "1"; exit};
                   m=$1-$2; if (m%$3==0) {print "1"} else {print "0"}}' -`

    # Determine the number of months from the end of the previous chunk to the end of
    # the current job based on the number of months since run start and chunk size(s)
    months_since_prev_chunk=`echo $months_since_run_start $first_chunk_size $chunk_size | awk '
                            {if ($1<=$2) {printf "%d",$1; exit};
                            m=$1-$2; mm=m%$3; if(mm==0){mm=$3}; printf "%d",mm}' -`

    # If the number of months since run_start_(year|month) is equal to
    # the first chunk size then this must be the first chunk
    if [ $months_since_run_start -eq $first_chunk_size ]; then
      # This is the first time the extra decks will be run
      is_first_chunk=1
      # Reset chunk_size. This may or may not change its value.
      eval chunk_size\=$first_chunk_size
    fi

    if [ $is_last_job -eq 1 ]; then
      # Determine if the extra decks (pooling, time series etc) are to be run
      # after the last job in the string

      if [ $months_since_prev_chunk -lt $chunk_size ]; then
        # If this is not a full pooling interval then the user may control whether
        # or not the extra decks are to be run at the end of the job string
        if [ $with_partial_chunk -eq 0 ]; then
          # Do not run the extra decks
          is_chunk_time=0
        else
          # Run the extra decks even though this is a short interval
          is_chunk_time=1
        fi
      else
        # Run extra decks (pooling, time series etc) on the last job of the run
        is_chunk_time=1
      fi

      # Set chunk_size so that it is exactly the number of months since
      # last time pooling etc were run, or the start of the run if
      # pooling etc has never been done for this run
      chunk_size=$months_since_prev_chunk
    fi

    # chunk_start_year and chunk_start_mon are $chunk_size
    # months before stop_year and stop_mon
    chunk_start_year=`echo $chunk_size $stop_year $stop_mon|awk '
      {y1 = $2 - int($1/12); m1 = $3 - $1%12 + 1;
       if (m1>12) {m1 = m1 - 12; y1 += 1}
       if (m1<1)  {m1 = m1 + 12; y1 -= 1}
       printf "%3.3d",y1}' -`
    chunk_start_mon=`echo $chunk_size $stop_year $stop_mon|awk '
      {y1 = $2 - int($1/12); m1 = $3 - $1%12 + 1;
       if (m1>12) {m1 = m1 - 12; y1 += 1}
       if (m1<1)  {m1 = m1 + 12; y1 -= 1}
       printf "%2.2d",m1}' -`
    chunk_stop_year=$stop_year
    chunk_stop_mon=$stop_mon

    # When diagnostics and time series are created for a coupled model run
    # do not delete gz files until the time series are complete
    if [ $with_time_series -eq 1 -a x"$coupled" = xon ]; then
      # eval is required to hide this assignment from cccjob substitution
      xxx=`echo $mdelete_suffix_list|sed 's/\g\z//'`
      eval mdelete_suffix_list\=\"$xxx\"
    fi

    # When diagnostics and time series are created for cmam20 runs
    # do not delete td files until the time series are complete
    if [ $with_time_series -eq 1 -a x"$pdiag_type" = xcmam20 ]; then
      # eval is required to hide this assignment from cccjob substitution
      xxx=`echo $mdelete_suffix_list|sed 's/\t\d//'`
      eval mdelete_suffix_list\=\"$xxx\"
    fi

    #######################################################
    ###################### Section 1 ######################
    #######################################################
    # This section is done for every invocation of pxdump
    #
    # Create a job string to do the following
    # - Dump history files from the back end to cfs
    # - Save history files on the front end
    # - Create diagnostic files on the front end
    # - Remove history files from the front end
    # - Dump diagnostic files to cfs
    # - Delete history files from the back end
    # - Clean up lock files etc
    #######################################################

    if [ -n "$mdump_months" ]; then
      # If mdump_months is defined then dump files in chunks of mdump_months

      # Prepare to dump history files from the back end to cfs
      DUMPHIST=`echo "mdump:-${mdump_months}m"|sed 's/ //g'`
      # Do not define an arclabel for this dump, let mdump do this
      mdump_arclabel=''
      mdump_rmdskcpy=off
      mdump_svsave=off
      mdump_sv=off
      mdump_with_lsarc=off
      mdump_dpalist=on

      if [ $pdiag_dump_hist -eq 0 ]; then
        # Never dump history files to cfs
        # History files will always be deleted once they are no longer needed
        DUMPHIST=''
      fi

      # Load the same files from cfs onto the front end
      LOADHIST=`echo "mload:-${mdump_months}m"|sed 's/ //g'`
      if [ $gateway_is_back_end -eq 1 ]; then
        # Load these files onto the back end
        mload_sv=off
      else
        # Load these files onto the front end
        mload_sv=on
      fi

      # Copy the same files from the back end onto the front end
      COPYHIST=`echo "othr2fe:-${mdump_months}m"|sed 's/ //g'`

      if [ $gateway_is_back_end -eq 1 ]; then
        # Do not delete files from the "front" end when running diags on the back end
        DELHIST=''
      else
        # Delete the same files from the front end
        DELHIST=`echo "mdelete:${mdump_months}m"|sed 's/ //g'`
      fi

      # Run the diagnostics
      RUNDIAG=`echo "${diag_path}:m"|sed 's/ //g'`
      pdiag_dummy_diag=off
      ToF pdiag_dummy_diag
      if [ $pdiag_dummy_diag -eq 1 ]; then
        RUNDIAG='mkdiag:m'
        pdiag_mkdiag_clone=dc_t_pd17_1850
        mkdiag_clone=${pdiag_mkdiag_clone:=''}
      fi

      # Delete all files in fdel_list from the back end
      [ ! -s fdel_list ] && bail "fdel_list is missing or empty"
      PXDELJOB='pxdel=fdel_list:s'

      # Define a string containing a job description for dumping
      # diagnostic files at the end of the current job
      if [ -z "$ddump_months" ]; then
        # When ddump months has not been set, dump diag file here
        mdump_diag_uxxx="d$ch2"
        if [ $months_in_this_job -gt 12 ]; then
          DUMPDIAG=`echo "mdump_diag:s:12m"|sed 's/ //g'`
          mdump_diag_arclabel=""
        else
          DUMPDIAG=`echo "mdump_diag:s"|sed 's/ //g'`
          mdump_diag_arclabel="${mdump_diag_uxxx}_${runid}_${ym_range}_diag"
        fi
        # Allow the user to supply the suffix list for all diag files that are to be dumped
        pdiag_mdump_diag_suffix_list=${pdiag_diag_suffix_list:-''}
        if [ -z "$pdiag_mdump_diag_suffix_list" ]; then
          mdump_diag_suffix_list=`echo $mdump_suffix_list|awk '
                  {for (i=1; i<=NF; i++) {if ($i ~ /^gs$/) {printf "gp xp "};}}' -`
          # Check for the existence of a "cm" file
          has_cm=`echo $mdump_suffix_list|awk '
                  {for (i=1; i<=NF; i++) {if ($i ~ /^cm$/) {printf "cm"}}}' -`
          if [ x"$has_cm" = "xcm" ]; then
            # There is a cm file and therefore, usually, a cp file unless the user has provided
            # their own diag deck which does not contain the comdeck that creates the cp file
            [ $with_cp -eq 1 ]  && mdump_diag_suffix_list="$mdump_diag_suffix_list cp"
          fi
          [ $with_dd    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dd"
          [ $with_dp    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dp"
          [ $with_dsd   -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dsd"
          [ $with_dcosp -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dcosp"
          [ $with_cc    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list cc"
          [ $with_ie    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list ie"
          [ $with_xp6   -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list xp6"
          # [ $with_gp6   -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list gp6"
        else
          # If the user has set pdiag_mdump_diag_suffix_list then use its value
          # No checking is done in this case
          mdump_diag_suffix_list="$pdiag_mdump_diag_suffix_list"
        fi
        if [ $gateway_is_back_end -eq 1 ]; then
          # Dump these files from the back end
          mdump_diag_sv=off
        else
          # Dump these files from the front end
          eval mdump_diag_sv\=on
        fi
        mdump_diag_with_lsarc=off
        # Ensure that the copy of the diagnostic files on the cfs disk buffer
        # gets deleted as soon as it has been written to tape
        mdump_diag_rmdskcpy=on
      else
        # Dump diag files below, at the end of every pooling interval
        DUMPDIAG=''
        mdump_diag_suffix_list=''
        mdump_diag_uxxx=''
        mdump_diag_arclabel=''
        mdump_diag_sv=''
        mdump_diag_rmdskcpy=''
        mdump_diag_with_lsarc=''
      fi

      if [ $pdiag_dump_diag -eq 0 ]; then
        # Never dump diagnostic files to cfs
        DUMPDIAG=''
        mdump_diag_suffix_list=''
        mdump_diag_uxxx=''
        mdump_diag_arclabel=''
        mdump_diag_sv=''
        mdump_diag_rmdskcpy=''
        mdump_diag_with_lsarc=''
      fi

      if [ $gateway_is_back_end -eq 1 ]; then
        # Force sv = off
        eval sv\=off
      else
        # Set sv null so that it does not override mload_sv or mdump_sv
        eval sv\=''
      fi

      # Remove lock files create above (only for parallel runs)
      RMLOCK="rm_lock_job:s"
      [ -n "$stand_alone_craworkval" ] && RMLOCK=''

      # Determine the job description

      if [ -n "$diag_job" ]; then
        # The user has supplied the name of a diagnostic job
        # so diagnostics will be included in this job string

        if [ $load_hist -eq 1 ]; then
          # Load history files onto the front end from cfs and run diagnostics
          # before deleting history files from the front end
          JOBDESC="$LOADHIST $RUNDIAG $DELHIST $DUMPDIAG $RMLOCK"
        else
          if [ $dump_or_copy_hist -eq 1 ]; then
            # Dump history files from the back end (sv = off) and save them on the front
            # end (svsave = on) then run the diagnostics and delete history files from
            # both the front end and the back end
            JOBDESC="$DUMPHIST $RUNDIAG $DELHIST $DUMPDIAG $RMLOCK $PXDELJOB"
            # Dump history files from the back end
            mdump_sv=off
            if [ $gateway_is_back_end -eq 1 ]; then
              # Do not save history files on the front end at the same time
              mdump_svsave=off
            else
              # Also save these history files on the front end at the same time
              mdump_svsave=on
            fi
            # Ensure that the copy of the history files on the cfs disk buffer
            # gets deleted as soon as it has been written to tape
            mdump_rmdskcpy=on
            # Allow svsave = on when using cnfs as RUNPATH or CCRNTMP
            mdump_cnfsallow=on
            # cache DATAPATH information in a local file for access, save or delete
            mdump_dpalist=on
          else
            # Copy history files from the back end and run diagnostics
            # before deleting from the front end but not from the back end
            if [ $del_hist_after_copy -eq 1 ]; then
              # Delete history files from the back end after they are copied
              JOBDESC="$COPYHIST $RUNDIAG $DELHIST $DUMPDIAG $RMLOCK $PXDELJOB"
            else
              # Do not delete history files from the back end
              JOBDESC="$COPYHIST $RUNDIAG $DELHIST $DUMPDIAG $RMLOCK"
            fi
          fi
        fi
      else
        # Do not run diagnostics, only dump and delete history files
        JOBDESC="$DUMPHIST $RMLOCK $PXDELJOB"
      fi

    else

      # by default dump and delete all files at the end of the run period
      # Define an arclabel for this tape dump
      mdump_arclabel="${pxdiag_uxxx}_${runid}_$ym_range"

      # sv on means dump from front end disks
      if [ $gateway_is_back_end -eq 1 ]; then
        # sv should be off when gateway is the back end
        eval sv\=off
      else
        # sv should always be on when gateway is the front end
        eval sv\=on
      fi
      JOBDESC="mdump:s $RMLOCK"

      if [ $pdiag_dump_hist -eq 0 ]; then
        # Never dump history files to cfs
        [ -z "$RMLOCK" ] && bail "Cannot create the section 1 jobs string. RMLOCK is missing"
        JOBDESC="$RMLOCK"
      fi

      # Ensure that the copy of the history files on the cfs disk buffer
      # gets deleted as soon as it has been written to tape
      mdump_rmdskcpy=on

    fi

    varlist='uxxx
             runid
             noprint
             mdump_uxxx
             mdump_suffix_list
             mdump_cfsuser
             mdump_CCRNTMP
             mdump_RUNPATH
             mdump_qsublog
             mdump_with_lock_file
             mdump_sv
             mdump_svsave
             mdump_rmdskcpy
             mdump_cnfsallow
             mdump_dpalist
             mdump_with_lsarc
             mdump_diag_suffix_list
             mdump_diag_uxxx
             mdump_diag_arclabel
             mdump_diag_sv
             mdump_diag_rmdskcpy
             mdump_diag_with_lsarc
             check_cfs_arcfile
             nontwrkchk
             bemach
             cfsuser
             masterdir
             shortermdir
             nolist
             sv
             besc
             mdump_arclabel
             back_end_mach'

    varlist2='mload_uxxx
              mload_suffix_list
              mload_cfsuser
              mload_CCRNTMP
              mload_RUNPATH
              mload_sv
              mdelete_uxxx
              mdelete_suffix_list
              mdelete_CCRNTMP
              mdelete_RUNPATH
              mdelete_leave_last_mon
              othr2fe_uxxx
              othr2fe_suffix_list
              othr2fe_CCRNTMP
              othr2fe_RUNPATH
              run_start_year
              run_start_month
              run_stop_year
              run_stop_month
              mkdiag_clone
              CCCJOB_ROOT
              JHOME
              JHOME_DATA
              JHOME_RUN
              BERUNPATH'

    diagvars='diag_CCRNTMP
              diag_RUNPATH
              diag_uxxx
              model_uxxx
              model_version
              gptime
              stime
              mtime
              gssave
              dsd3D
              daily_cmip5_tiers
              year_offset
              PhysA
              PhysO
              CarbA
              CarbO
              CarbL
              pxdel_check_last_mon
              pxdel_lock_check'

    # Create a file containing variable definitions to pass to cccjob

    # This eval will protect this crawork def from cccjob substitution
    craworkval=${pxdiag_id}_$stamp
    [ -n "$stand_alone_craworkval" ] && craworkval=$stand_alone_craworkval
    eval craworkdef\=crawork\=$craworkval
    cat > mdump_defs <<EOF
$craworkdef
EOF
    for var in $varlist; do
      eval val=\$$var
      # If this variable is defined add it to the list
      [ -n "$val" ] && echo ${var}=\'$val\' >> mdump_defs
    done
    for var in $varlist2; do
      eval val=\$$var
      # If this variable is defined add it to the list
      [ -n "$val" ] && echo ${var}=\'$val\' >> mdump_defs
    done
    for var in $diagvars; do
      eval val=\$$var
      # If this variable is defined add it to the list
      [ -n "$val" ] && echo ${var}=\'$val\' >> mdump_defs
    done

    # Define start/stop year/month cccjob options for this invocation
    curr_year=`echo $pxdiag_start_year|awk '{printf "%d",$1}' -`
    curr_mon=`echo $pxdiag_start_mon|awk '{printf "%d",$1}' -`
    nxt_year=`echo $pxdiag_stop_year|awk '{printf "%d",$1}' -`
    nxt_mon=`echo $pxdiag_stop_mon|awk '{printf "%d",$1}' -`
    start_opt="--start_time=${curr_year}:$curr_mon"
    stop_opt="--stop_time=${nxt_year}:$nxt_mon"

    # Write the job string to dump history files and create/dump diagnostics
    # This is done every time pxdiag is invoked
    $CCCJOB_ENV cccjob --out=$pxdiag_job --job="$JOBDESC" $start_opt $stop_opt mdump_defs

    ###############################################################
    ########################## Section 2 ##########################
    ###############################################################
    # This section is done at the end of every chunk_size months
    #
    # Create a job string to do the following
    # - Optionally run pooling
    # - Optionally create time series from diagnotic files
    # - Delete diagnotic files from the front end
    # - Clean up lock files etc
    ###############################################################

    if [ $is_chunk_time -eq 1 ]; then
      # Define a string that will run pooling, deleting diagnostic files
      # and creating time series then append this string to the
      # "$pxdiag_job" string created above

      # Define a range string for the current time series chunk
      chunk_ym_range="${chunk_start_year}m${chunk_start_mon}_${chunk_stop_year}m${chunk_stop_mon}"

      # Define variables specific to ondisk.
      # Ondisk will cause the string to abort if any requist files are missing
      # The files required are monthly diagnostic files for the full chunk_size
      # number of months and possibly the "gz" history files if this is a coupled
      # model and time series are requested
      ondisk_prefix_list="d$ch2"
      if [ -z "$pdiag_diag_suffix_list" ]; then
        # If the user has not supplied a diag suffix list then set it here
        ondisk_suffix_list="gp xp"
        [ $with_cc    -eq 1 ] && ondisk_suffix_list="$ondisk_suffix_list cc"
        [ $with_ie    -eq 1 ] && ondisk_suffix_list="$ondisk_suffix_list ie"
        [ $with_gp6   -eq 1 ] && ondisk_suffix_list="$ondisk_suffix_list gp6"
        [ $with_xp6   -eq 1 ] && ondisk_suffix_list="$ondisk_suffix_list xp6"
        if [ $with_td    -eq 1 ]; then
          ondisk_prefix_list="$ondisk_prefix_list :m$ch2"
          ondisk_suffix_list="$ondisk_suffix_list :td"
        fi
      else
        # If the user has supplied a diag suffix list then use if for ondisk_suffix_list
        ondisk_suffix_list=$pdiag_diag_suffix_list
      fi
      ondisk_comment="run: $runid  range: $chunk_ym_range"

      if [ -n "$ddump_months" ]; then
        # When ddump months is defined, dump diag file here
        mdump_diag_uxxx="d$ch2"
        if [ $chunk_size -gt $ddump_months ]; then
          DUMPDIAG=`echo "mdump_diag:${chunk_size}m:${ddump_months}m"|sed 's/ //g'`
        else
          DUMPDIAG=`echo "mdump_diag:${chunk_size}m"|sed 's/ //g'`
        fi
        mdump_diag_arclabel=""
        # Allow the user to supply the suffix list for all diag files that are to be dumped
        pdiag_mdump_diag_suffix_list=${pdiag_diag_suffix_list:-''}
        if [ -z "$pdiag_mdump_diag_suffix_list" ]; then
          mdump_diag_suffix_list=`echo $mdump_suffix_list|awk '
                  {for (i=1; i<=NF; i++) {if ($i ~ /^gs$/) {printf "gp xp "}}}' -`
          # Check for the existence of a "cm" file
          has_cm=`echo $mdump_suffix_list|awk '
                  {for (i=1; i<=NF; i++) {if ($i ~ /^cm$/) {printf "cm"}}}' -`
          if [ x"$has_cm" = "xcm" ]; then
            # There is a cm file and therefore, usually, a cp file unless the user has provided
            # their own diag deck which does not contain the comdeck that creates the cp file
            [ $with_cp -eq 1 ]  && mdump_diag_suffix_list="$mdump_diag_suffix_list cp"
          fi
          [ $with_dd    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dd"
          [ $with_dp    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dp"
          [ $with_dsd   -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dsd"
          [ $with_dcosp -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list dcosp"
          [ $with_cc    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list cc"
          [ $with_ie    -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list ie"
          [ $with_xp6   -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list xp6"
          # [ $with_gp6   -eq 1 ] && mdump_diag_suffix_list="$mdump_diag_suffix_list gp6"
        else
          # If the user has set pdiag_mdump_diag_suffix_list then use its value
          # No checking is done in this case
          mdump_diag_suffix_list="$pdiag_mdump_diag_suffix_list"
        fi
        if [ $gateway_is_back_end -eq 1 ]; then
          mdump_diag_sv=off
        else
          eval mdump_diag_sv\=on
        fi
        mdump_diag_with_lsarc=off
        # Ensure that the copy of the diagnostic files on the cfs disk buffer
        # gets deleted as soon as it has been written to tape
        mdump_diag_rmdskcpy=on
      else
        # When ddump months is not set, dump diag files above
        DUMPDIAG=''
        mdump_diag_suffix_list=''
        mdump_diag_uxxx=''
        mdump_diag_arclabel=''
        mdump_diag_sv=''
        mdump_diag_rmdskcpy=''
        mdump_diag_with_lsarc=''
      fi

      if [ $pdiag_dump_diag -eq 0 ]; then
        # Never dump diagnostic files to cfs
        DUMPDIAG=''
        mdump_diag_suffix_list=''
        mdump_diag_uxxx=''
        mdump_diag_arclabel=''
        mdump_diag_sv=''
        mdump_diag_rmdskcpy=''
        mdump_diag_with_lsarc=''
      fi

      if [ $pdiag_dump_hist -eq 0 -a $pdiag_with_dumprs -eq 1 ]; then
        # When history files are not dumped to cfs above, we need to save restarts
        # at the end of every time series chunk
        dumprs_suffix_list="rs ab an"
        if [ x"$coupled" = xon ]; then
          # Add ocean restart files
          [ $with_cs -eq 1 ] && dumprs_suffix_list="$dumprs_suffix_list cs"
          [ $with_os -eq 1 ] && dumprs_suffix_list="$dumprs_suffix_list os"
          [ $with_ob -eq 1 ] && dumprs_suffix_list="$dumprs_suffix_list ob"
          [ $with_cplrs -eq 1 ] && dumprs_suffix_list="$dumprs_suffix_list cplrs.tar"
        fi
        rs_flist_cfs=tmp_rs_flist_cfs_$stamp
        rs_flist_del=tmp_rs_flist_del_$stamp
        rm -f $rs_flist_cfs
        touch $rs_flist_cfs
        rm -f $rs_flist_del
        touch $rs_flist_del

        # Populate rs_flist_cfs with lists of files to be dumped
        dumprs_curr_year=`echo $chunk_start_year|awk '{printf "%3.3d",$1-1}' -`
        while [ $dumprs_curr_year -lt $chunk_stop_year ]; do
          dumprs_curr_year=`echo $dumprs_curr_year|awk '{printf "%3.3d",$1+1}' -`
          for sfx in $dumprs_suffix_list; do
            # Using chunk_stop_mon assumes that when chunk_start_year and chunk_stop_year
            # are different, chunk_stop_mon is always 12
            if [ $chunk_start_year -eq $chunk_stop_year ]; then
              dumprs_curr_mon=$chunk_stop_mon
            else
              dumprs_curr_mon=12
            fi
            local_rs="m${ch2}_${runid}_${dumprs_curr_year}_m${dumprs_curr_mon}_$sfx"
            echo "$local_rs" >> $rs_flist_cfs
          done
        done

        # Populate rs_flist_del with lists of files to be delted
        delrs_curr_year=`echo $chunk_start_year|awk '{printf "%3.3d",$1-1}' -`

        # Add the coupler executable to the delete list but not to the dump list
        # since the coupler executable is always saved in the coupler restart archive
        [ x"$coupled" = xon -a $with_cplrs -eq 1 ] && dumprs_suffix_list="$dumprs_suffix_list cpl.exe"

        # Add the restarts for Dec of the year prior to chunk_start_year
        # to the list of files to be deleted
        # If these restarts do not exist then the delete will be silently ignored
        for sfx in $dumprs_suffix_list; do
          local_rs="m${ch2}_${runid}_${delrs_curr_year}_m12_$sfx"
          echo "$local_rs" >> $rs_flist_del
        done

        # Add restarts for every month in the current chunk except the last month
        while [ $delrs_curr_year -lt $chunk_stop_year ]; do
          delrs_curr_year=`echo $delrs_curr_year|awk '{printf "%3.3d",$1+1}' -`
          for delrs_curr_mon in "01" "02" "03" "04" "05" "06" "07" "08" "09" "10" "11" "12"; do
            # Never delete the last year/month of the time series chunk
            [ $delrs_curr_year -eq $chunk_stop_year -a $delrs_curr_mon -eq $chunk_stop_mon ] && break
            for sfx in $dumprs_suffix_list; do
              # Add and entry for the job script
              echo "m${ch2}_${runid}_${delrs_curr_year}_m${delrs_curr_mon}__script" >> $rs_flist_del
              local_rs="m${ch2}_${runid}_${delrs_curr_year}_m${delrs_curr_mon}_$sfx"
              # Only add file names for existing files
              found=0
              test -e "$DATAPATH/${local_rs}.001" && found=1
              test -e "$DATAPATH/${local_rs}.002" && found=1
              test -e "$DATAPATH/${local_rs}.003" && found=1
              test -e "$DATAPATH/${local_rs}.004" && found=1
              test -e "$DATAPATH/${local_rs}.005" && found=1
              test $found -eq 1 || continue
              echo "$local_rs" >> $rs_flist_del
            done
          done
        done

        # Insert a dump list job when rs_flist_cfs is not empty
        DUMPRS="dump_list_rs=${rs_flist_cfs}:${chunk_size}m"
        [ $dumprs_sublist -eq 1 ] && DUMPRS="dump_sublist_rs=${rs_flist_cfs}:${chunk_size}m"
        # If rs_flist_cfs is empty then do not insert the dump_list job
        [ ! -s "$rs_flist_cfs" ] && DUMPRS=''
        if [ $gateway_is_back_end -eq 1 ]; then
          dump_list_rs_sv=off
          dump_sublist_rs_sv=off
        else
          eval dump_list_rs_sv\=on
          eval dump_sublist_rs_sv\=on
        fi
        dump_list_rs_arclabel="m${ch2}_${runid}_${chunk_ym_range}_restart"
        dump_sublist_rs_arclabel="m${ch2}_${runid}_${chunk_ym_range}_restart"

        # Add a delete job to DUMPRS
        if [ -n "$DUMPRS" -a -s "$rs_flist_del" ]; then
          # If the DUMPRS job was inserted and the delete list is not empty
          # then add a delete list job
          DUMPRS="$DUMPRS del_list_rs=${rs_flist_del}:${chunk_size}m"
        fi
        
      else
        DUMPRS=''
        dump_list_rs_sv=''
        dump_list_rs_arclabel=''
        dump_sublist_rs_sv=''
        dump_sublist_rs_arclabel=''
      fi

      # Define a string containing a job description for time series
      if [ $with_time_series -eq 1 ]; then
        TIMESERIES=`echo "${ch2}diag2ts:${chunk_size}m"|sed 's/ //g'`
        if [ x"$coupled" = xon -a $with_gz -eq 1 ]; then
          # When time series are created for a coupled model run,
          # we must also delete gz after the time series are created
          DELGZ=`echo "mdelete_gz:${chunk_size}m"|sed 's/ //g'`
          mdelete_gz_suffix_list="gz"
          mdelete_gz_uxxx="m$ch2"
          # Modify these to tell ondisk that the gz files are also required
          ondisk_prefix_list="d${ch2}:m${ch2}"
          ondisk_suffix_list="gp xp:gz"
        else
          DELGZ=''
          mdelete_gz_suffix_list=''
          mdelete_gz_uxxx=''
        fi
        if [ x"$pdiag_type" = xcmam20 ]; then
          # When time series are created for a cmam20 run,
          # we must also delete td after the time series are created
          DELTD=`echo "mdelete_td:${chunk_size}m"|sed 's/ //g'`
          mdelete_td_suffix_list="td"
          mdelete_td_uxxx="m$ch2"
          # use_input_ibuf2 = off means use YYYYMM format for ibuf2 in output time series files
          # use_input_ibuf2 =  on means use the ibuf2 value found in the corresponding input file
          use_input_ibuf2=on
        elif [ $with_td -eq 1 ]; then
          # Also delete td after the time series are created
          DELTD=`echo "mdelete_td:${chunk_size}m"|sed 's/ //g'`
          mdelete_td_suffix_list="td"
          mdelete_td_uxxx="m$ch2"
        else
          DELTD=''
          mdelete_td_suffix_list=''
          mdelete_td_uxxx=''
          use_input_ibuf2=''
        fi
      else
        TIMESERIES=''
        DELGZ=''
        mdelete_gz_suffix_list=''
        mdelete_gz_uxxx=''
        DELTD=''
        mdelete_td_suffix_list=''
        mdelete_td_uxxx=''
      fi

      # Set options for time series files
      diag2ts_load_diag=off
      diag2ts_delete_diag=off
      diag2ts_CCRNTMP=${pxdiag_CCRNTMP:-''}
      diag2ts_RUNPATH=${pxdiag_RUNPATH:-''}
      diag2ts_RMTRUNPATH=${RMTRUNPATH:=''}
      diag2ts_transfer_time_series=${transfer_time_series:=''}
      diag2ts_transfer_all_dd=${transfer_all_dd:=''}
      diag2ts_remusr=${remusr:=''}
      diag2ts_remserver=${remserver:=''}
      diag2ts_rem_save=''
      # Inclusion of additional ocean diagnostic decks that use the time series created from
      # the diagnostic files (gp, xp, cp) may be controlled via diag2ts_with_gztsdiag.
      # diag2ts_with_gztsdiag is true by default
      diag2ts_with_gztsdiag=''
      # Alternate names for the 2 decks that are run when diag2ts_with_gztsdiag is true
      # may be supplied in diag2ts_gztsdiag_deck1 and diag2ts_gztsdiag_deck2.
      # If either of these deck names is "no" or "off" then that deck will not be used.
      # If a deck name is supplied then it must begin with "gztsdiag..."
      diag2ts_gztsdiag_deck1=''
      diag2ts_gztsdiag_deck2=''
      diag2ts_with_gz=$with_gz
      diag2ts_with_cp=$with_cp
      diag2ts_with_dd=$with_dd
      diag2ts_with_dp=$with_dp
      diag2ts_with_ds=off
      diag2ts_with_dsd=$with_dsd
      diag2ts_with_dcosp=$with_dcosp
      diag2ts_with_mcosp=$with_mcosp
      diag2ts_gssave=$gssave
      diag2ts_with_cfmip=''
      diag2ts_with_lsarc=$with_lsarc

      # A flag that may be used to turn off deletion of time series files after they
      # have been created and put on cfs
      diag2ts_delete_time_series=''

      # A flag that may be used to turn off dumping time series files to cfs after they
      # have been created
      diag2ts_dump_time_series=''

      # A flag to toggle creation of a single spltdiag job for each suffix (e.g. gp, dd, ..)
      # that is associated with diag files used to create time series 
      # This will also cause a separate tdumper job to be created for each suffix,
      # when time series files are dumped to cfs
      diag2ts_one_job_per_suffix=''

      # if [ ! x"$pdiag_type" = xcmam20 ]; then
      #   if [ $with_td -eq 1 ]; then
      #     diag2ts_prefix_list="${diag2ts_prefix_list}:m$ch2"
      #     diag2ts_suffix_list="${diag2ts_suffix_list}:td"
      #   fi
      # fi

      # These variables are used to determine if and where spltdiag will copy
      # time series files to a remote machine
      # spltdiag_trans = on|off ...on means transfer time series to the remote machine
      spltdiag_trans=''
      # spltdiag_remserver is the name of the remote host used when spltdiag_trans = on
      spltdiag_remserver=''
      # spltdiag_remdir is the name of an existing directory on the remote host into which
      # the time series files will be copied when spltdiag_trans = on
      spltdiag_remdir=''

      # Allow user to define a list of specific variables (ie superlabels) that will
      # limit the number of files that spltdiag creates
      spltdiag_vars=''

      if [ $is_first_chunk -eq 1 ]; then
        # If this is the first chunk of the run then assume that the Dec diagnostic
        # files from the year before the start of this chunk are not on disk
        force_first_djf=0

        # Also set pool_ignore_first_year = on unless the user has explicitly set
        # pdiag_pool_ignore_first_year, in which case its value will be used
        pdiag_pool_ignore_first_year=''
        pool_ignore_first_year=${pdiag_pool_ignore_first_year:=on}

        # Never ignore the first year if the first chunk size is 1 year or less
        [ $first_chunk_size -le 12 ] && eval pool_ignore_first_year\=off
      else
        # Any chunk that is not the first chunk of the run, assume that the first
        # DJF is complete and multi year pools will contain all years in this chunk
        force_first_djf=1
        pool_ignore_first_year=off
      fi

#DBG
#DBG echo "is_first_chunk=$is_first_chunk  force_first_djf=$force_first_djf  pool_ignore_first_year=$pool_ignore_first_year" >>$error_out

#      chunk_ym_range="${chunk_start_year}m${chunk_start_mon}_${chunk_stop_year}m${chunk_stop_mon}"

      # Optionally include pooling under user control
      pool_flist=tmp_pool_flist_$stamp
      dump_list_pool_sv=''
      dump_list_pool_arclabel=''
      dump_list_pool_rmdskcpy=''
      if [ $with_pool -eq 1 ]; then
        # Determine chunk size in years to use with [ca]pool, which
        # will only accept a pooling interval in years
        # This implies that chunk_size (in months) must correspond
        # to an integer number of years
        chunk_size_ok=`echo $chunk_size|awk '{if($1%12==0){print "1"}else{print "0"}}' -`
        [ $chunk_size_ok -eq 0 ] && \
          bail "Chunk size $chunk_size months must be an integer number of years."
        chunk_size_y=`echo $chunk_size|awk '{printf "%d",$1/12}' -`
        if [ $chunk_size_y -gt 1 ]; then
          POOLING=`echo "${ch2}pool:${chunk_size_y}y"|sed 's/ //g'`
        else
          # The ?pool alias will not allow a pooling interval of 1 year or less
          if [ $force_first_djf -eq 1 ]; then
            # The first DJF should be on disk ... pool all seasons
            POOLING="pool_sea:DMJS pool_ann:${chunk_size}m"
            rm -f $pool_flist
            cat <<EOF >$pool_flist
p${ch2}_${runid}_${start_year}_djf_gp
p${ch2}_${runid}_${start_year}_djf_xp
p${ch2}_${runid}_${start_year}_jja_gp
p${ch2}_${runid}_${start_year}_jja_xp
p${ch2}_${runid}_${start_year}_mam_gp
p${ch2}_${runid}_${start_year}_mam_xp
p${ch2}_${runid}_${start_year}_son_gp
p${ch2}_${runid}_${start_year}_son_xp
p${ch2}_${runid}_${start_year}_ann_gp
p${ch2}_${runid}_${start_year}_ann_xp
EOF
          else
            # Assume the first DJF is not on disk
            POOLING="pool_sea:MJS pool_ann:${chunk_size}m"
            rm -f $pool_flist
            cat <<EOF >$pool_flist
p${ch2}_${runid}_${start_year}_jja_gp
p${ch2}_${runid}_${start_year}_jja_xp
p${ch2}_${runid}_${start_year}_mam_gp
p${ch2}_${runid}_${start_year}_mam_xp
p${ch2}_${runid}_${start_year}_son_gp
p${ch2}_${runid}_${start_year}_son_xp
p${ch2}_${runid}_${start_year}_ann_gp
p${ch2}_${runid}_${start_year}_ann_xp
EOF
          fi
          DUMPOOL="dump_list_pool=${pool_flist}:${chunk_size}m"
          if [ $gateway_is_back_end -eq 1 ]; then
            dump_list_pool_sv=off
          else
            dump_list_pool_sv=on
          fi
          dump_list_pool_arclabel="p${ch2}_${runid}_${chunk_ym_range}_pooled"

          # Ensure that the copy of the pooled files on the cfs disk buffer
          # gets deleted as soon as it has been written to tape
          dump_list_pool_rmdskcpy=on

          DELPOOL="del_list_pool=${pool_flist}:${chunk_size}m"
          POOLING="$POOLING $DUMPOOL $DELPOOL"
          # pool_uxxx must be set explicitly for the case of pooling 1 year or less
          pool_uxxx="p$ch2"
        fi
      else
        POOLING=''
      fi

      # These variables are used by the [ca]pool alias
      load_diag=off
      dump_diag=off
      delete_diag=off
      pdiag_dump_pool=''
      dump_pooled=${pdiag_dump_pool:-on}
      pdiag_delete_pool=''
      delete_pooled=${pdiag_delete_pool:-on}
      psdelete_leave_last_pool=off
      transfer_pooled=${transfer_time_series:=''}
      pool_RMTRUNPATH=${RMTRUNPATH:=''}
      pool_CCRNTMP=${pxdiag_CCRNTMP:-''}
      pool_RUNPATH=${pxdiag_RUNPATH:-''}
      pool_with_cp=$with_cp

      # Define cccjob variables for the pooling/time series string
      varlist='uxxx
               runid
               noprint
               cfsuser
               masterdir
               shortermdir
               nolist
               sv
               besc
               diag_uxxx
               pool_uxxx
               mdelete_gz_suffix_list
               mdelete_gz_uxxx
               mdelete_td_suffix_list
               mdelete_td_uxxx
               spltdiag_trans
               spltdiag_remserver
               spltdiag_remdir
               spltdiag_vars
               diag2ts_prefix_list
               diag2ts_suffix_list
               diag2ts_load_diag
               diag2ts_delete_diag
               diag2ts_transfer_time_series
               diag2ts_transfer_all_dd
               diag2ts_CCRNTMP
               diag2ts_RUNPATH
               diag2ts_RMTRUNPATH
               diag2ts_remusr
               diag2ts_remserver
               diag2ts_rem_save
               diag2ts_with_gztsdiag
               diag2ts_gztsdiag_deck1
               diag2ts_gztsdiag_deck2
               diag2ts_with_gz
               diag2ts_with_cp
               diag2ts_with_dd
               diag2ts_with_dp
               diag2ts_gssave
               diag2ts_with_cfmip
               diag2ts_with_ds
               diag2ts_with_dsd
               diag2ts_with_dcosp
               diag2ts_with_mcosp
               diag2ts_with_lsarc
               diag2ts_delete_time_series
               diag2ts_dump_time_series
               diag2ts_one_job_per_suffix'

      varlist2='load_diag
                dump_diag
                delete_diag
                dump_pooled
                delete_pooled
                psdelete_leave_last_pool
                transfer_pooled
                force_first_djf
                dump_list_pool_sv
                dump_list_pool_arclabel
                dump_list_pool_rmdskcpy
                dump_list_rs_sv
                dump_list_rs_arclabel
                dump_sublist_rs_sv
                dump_sublist_rs_arclabel
                gssave
                dsd3D
                daily_cmip5_tiers
                pool_CCRNTMP
                pool_RUNPATH
                pool_RMTRUNPATH
                pool_ignore_first_year
                pool_with_cp
                coupled
                bemach
                mdump_diag_suffix_list
                mdump_diag_uxxx
                mdump_diag_arclabel
                mdump_diag_sv
                mdump_diag_rmdskcpy
                mdump_diag_with_lsarc
                run_start_year
                run_start_month
                run_stop_year
                run_stop_month
                use_input_ibuf2
                with_lsarc
                CCCJOB_ROOT
                JHOME
                JHOME_DATA
                JHOME_RUN
                BERUNPATH'

      # Ensure we use the same crawork definition as above
      cat > chunk_defs <<EOF
$craworkdef
EOF
      for var in $varlist; do
        eval val=\$$var
        # If this variable is defined add it to the list
        [ -n "$val" ] && echo ${var}=\'$val\' >> chunk_defs
      done
      for var in $varlist2; do
        eval val=\$$var
        # If this variable is defined add it to the list
        [ -n "$val" ] && echo ${var}=\'$val\' >> chunk_defs
      done

      if [ $pass_phys_carb -eq 1 ]; then
        for var in PhysA PhysO CarbA CarbO CarbL pass_phys_carb; do
          eval val=\$$var
          # If this variable is defined add it to the list
          [ -n "$val" ] && echo ${var}=\'$val\' >> chunk_defs
        done
      fi

      # pdiag_with_ondisk can be set to "off" if ondisk is not required
      pdiag_with_ondisk=''
      with_ondisk=${pdiag_with_ondisk:=on}
      ToF with_ondisk
      if [ $with_ondisk -eq 1 ]; then
        ##################################################################################
        # Define cccjob variables for the ondisk string
        ##################################################################################
        varlist='uxxx
                 runid
                 noprint
                 ondisk_prefix_list
                 ondisk_suffix_list
                 ondisk_comment
                 run_start_year
                 run_start_month
                 run_stop_year
                 run_stop_month
                 CCCJOB_ROOT
                 JHOME
                 JHOME_DATA
                 JHOME_RUN
                 BERUNPATH'

        # Ensure we use the same crawork definition as above
        cat > ondisk_defs <<EOF
$craworkdef
EOF
        for var in $varlist; do
          eval val=\$$var
          # If this variable is defined add it to the list
          [ -n "$val" ] && echo ${var}=\'$val\' >> ondisk_defs
        done

        # Create the ondisk job in a separate string so that different start/stop
        # dates may be used. This is desireable when pooling is done and the
        # diagnostic files for the month before chunk_start_year/chunk_start_mon
        # are required
        if [ $force_first_djf -eq 0 -o $with_pool -eq 0 ]; then
          # Either this is the first job in the run or pooling will not be done
          # Do not check that the diagnostic files for the month before
          # chunk_start_year/chunk_start_mon are on disk
          ondisk_start_year=$chunk_start_year
          ondisk_start_mon=$chunk_start_mon
          ONDISK=`echo "ondisk:-${chunk_size}m"|sed 's/ //g'`
        else
          # This is not the first job in the run
          # Check that the diagnostic files for the month before
          # chunk_start_year/chunk_start_mon are on disk
          if [ $chunk_start_mon -eq 1 ]; then
            ondisk_start_year=`expr $chunk_start_year - 1`
            ondisk_start_mon=12
          else
            ondisk_start_year=$chunk_start_year
            ondisk_start_mon=`expr $chunk_start_mon - 1`
          fi
          # Increase the insertion interval by 1 to account for the extra month
          ondisk_interval=`expr $chunk_size + 1`
          ONDISK=`echo "ondisk:-${ondisk_interval}m"|sed 's/ //g'`
        fi
        ondisk_start=`echo "--start=${ondisk_start_year}:$ondisk_start_mon"|sed 's/ //g'`
        ondisk_stop=`echo "--stop=${chunk_stop_year}:$chunk_stop_mon"|sed 's/ //g'`

        # Create the ondisk job string
        $CCCJOB_ENV cccjob --out=ondisk_job --job="$ONDISK" \
                           $ondisk_start $ondisk_stop ondisk_defs

        # Append this job to the end of the job created above
        cat $pxdiag_job ondisk_job > new_job
        mv new_job $pxdiag_job
      fi

      ##################################################################################
      # Define the remaining job
      ##################################################################################
      JOBDESC="$DUMPDIAG $DUMPRS $TIMESERIES $DELGZ $DELTD $POOLING"

      JOBD=`echo $JOBDESC|sed 's/ //g'`
      if [ -n "$JOBD" ]; then
        # Only create this when there is something in the job description
        # This will be empty when neither time series nor pooling are done

        # The time series/pooling string will start the month after the
        # previous time series/pooling string finished and stop at the
        # end of the current interval (as defined by stop_year/stop_mon)
        chunk_start=`echo "--start=${chunk_start_year}:$chunk_start_mon"|sed 's/ //g'`
        chunk_stop=`echo "--stop=${chunk_stop_year}:$chunk_stop_mon"|sed 's/ //g'`

        # Create the job string
        $CCCJOB_ENV cccjob --out=chunk_job --job="$JOBDESC" \
                           $chunk_start $chunk_stop chunk_defs

        # Append this job to the end of the job created above
        cat $pxdiag_job chunk_job > new_job
        mv new_job $pxdiag_job
      fi

      if [ $pdiag_delete_diag -eq 1 ]; then
        ##################################################################################
        # Define a string containing a job description for deleting diag files
        # This job will never delete diag files for the last month of the pool interval.
        # A separate job is created below to delete the last month of diag files, but only
        # when no other jobs, running in parallel with this job, require these files.
        ##################################################################################
        DELDIAG=`echo "mdelete_diag:s"|sed 's/ //g'`
        if [ -z "$stand_alone_craworkval" ]; then
          # Parallel diagnostics are running
          # (ie stand_alone_craworkval is null or not set)
          mdelete_diag_leave_last_mon=off
        else
          # Diagnostics are run in a serial in a single string
          # (ie stand_alone_craworkval is set to a valid string)
          mdelete_diag_leave_last_mon=on
        fi
        mdelete_diag_uxxx="d$ch2"
        mdelete_diag_suffix_list=`echo $mdump_suffix_list|awk '
          {for (i=1; i<=NF; i++)
             {if ($i ~ /^cm$/) {printf "cp "};
              if ($i ~ /^gs$/) {printf "gp xp "};}}' -`

        # Add more suffixes as required
        [ $with_dd    -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list dd"
        [ $with_dp    -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list dp"
        [ $with_ds    -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list ds"
        [ $with_dsd   -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list dsd"
        [ $with_dcosp -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list dcosp"
        [ $with_cc    -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list cc"
        [ $with_ie    -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list ie"
        [ $with_gp6   -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list gp6"
        [ $with_xp6   -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list xp6"
        if [ ! x"$pdiag_type" = xcmam20 ]; then
          [ $with_td  -eq 1 ] && mdelete_diag_suffix_list="$mdelete_diag_suffix_list td"
        fi

        # Define cccjob variables for the delete_diag string
        varlist='uxxx
                 runid
                 noprint
                 mdelete_diag_suffix_list
                 mdelete_diag_uxxx
                 mdelete_diag_leave_last_mon
                 run_start_year
                 run_start_month
                 run_stop_year
                 run_stop_month
                 JHOME
                 JHOME_DATA
                 JHOME_RUN
                 CCCJOB_ROOT'

        # Ensure we use the same crawork definition as above
        cat > deldiag_defs <<EOF
$craworkdef
EOF
        for var in $varlist; do
          eval val=\$$var
          # If this variable is defined add it to the list
          [ -n "$val" ] && echo ${var}=\'$val\' >> deldiag_defs
        done

        # Create the deldiag job in a separate string so that different start/stop
        # dates may be used. This will allow optional deletion of the last month of
        # the current pooling interval
        deldiag_start_year=$chunk_start_year
        deldiag_start_mon=$chunk_start_mon
        if [ $with_pool -eq 1 -a -z "$stand_alone_craworkval" ]; then
          # When pooling is done and parallel diagnostics are running
          # (ie stand_alone_craworkval is null or not set), do not delete diag
          # files for the last month of the current pooling interval
          if [ $chunk_stop_mon -eq 1 ]; then
            deldiag_stop_year=`expr $chunk_stop_year - 1`
            deldiag_stop_mon=12
          else
            deldiag_stop_year=$chunk_stop_year
            deldiag_stop_mon=`expr $chunk_stop_mon - 1`
          fi
        else
          # When pooling is not done, delete all diag files for the current chunk
          deldiag_stop_year=$chunk_stop_year
          deldiag_stop_mon=$chunk_stop_mon
        fi
        deldiag_start=`echo "--start=${deldiag_start_year}:$deldiag_start_mon"|sed 's/ //g'`
        deldiag_stop=`echo "--stop=${deldiag_stop_year}:$deldiag_stop_mon"|sed 's/ //g'`

        # Create the deldiag job string
        $CCCJOB_ENV cccjob --out=deldiag_job --job="$DELDIAG" \
                           $deldiag_start $deldiag_stop deldiag_defs

        # Append this job to the end of the job created above
        cat $pxdiag_job deldiag_job > new_job
        mv new_job $pxdiag_job
      fi

      ##################################################################################
      # Create a job to clean up lock files and delete the diagnostic files for the last
      # month of the current pooling interval and/or the last month of the previous
      # pooling interval. Two jobs in sequence will both require a common month of
      # diagnostic files. This will be the last month of the first job in this sequence
      # of 2 jobs. The first job uses them for monthly/annual pooling while the second
      # job uses them for pooling of the first season (e.g. DJF if starting on a
      # calendar year boundary).
      # This extra job will only be required when pooling is done and parallel
      # diagnostics are running (ie stand_alone_craworkval is null or not set).
      ##################################################################################
      if [ $with_pool -eq 1 -a -z "$stand_alone_craworkval" ]; then

        # Define a string to be used as part of the lock file name
        # The first year/month is one month before chunk_start_year/chunk_start_mon
        # and the second year/month is chunk_stop_year/chunk_stop_mon
        if [ $chunk_start_mon -eq 1 ]; then
          clean_diag_start_year=`echo $chunk_start_year | awk '{printf "%3.3d",$1-1}' -`
          clean_diag_start_mon=12
        else
          clean_diag_start_year=$chunk_start_year
          clean_diag_start_mon=`echo $chunk_start_mon | awk '{printf "%2.2d",$1-1}' -`
        fi
        clean_diag_stop_year=$chunk_stop_year
        clean_diag_stop_mon=$chunk_stop_mon
        strng1="d${ch2}_${runid}_${clean_diag_start_year}_m${clean_diag_start_mon}"
        strng2="d${ch2}_${runid}_${clean_diag_stop_year}_m${clean_diag_stop_mon}"
        lock1="${JHOME:-$HOME}/.queue/.crawork/lock_$strng1"
        lock2="${JHOME:-$HOME}/.queue/.crawork/lock_$strng2"

        clean_diag_suffix_list=`echo $mdump_suffix_list|awk '
          {for (i=1; i<=NF; i++)
             {if ($i ~ /^cm$/) {printf "cp "};
              if ($i ~ /^gs$/) {printf "gp xp "};}}' -`
        [ $with_dd    -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list dd"
        [ $with_dp    -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list dp"
        [ $with_ds    -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list ds"
        [ $with_dsd   -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list dsd"
        [ $with_dcosp -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list dcosp"
        [ $with_cc    -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list cc"
        [ $with_ie    -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list ie"
        [ $with_gp6   -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list gp6"
        [ $with_xp6   -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list xp6"
        if [ ! x"$pdiag_type" = xcmam20 ]; then
          [ $with_td  -eq 1 ] && clean_diag_suffix_list="$clean_diag_suffix_list td"
        fi

        # Create the lock files unless they already exist
        if [ ! -s "$lock1" ]; then
          touch $lock1 || bail "Unable to create lock file $lock1"
          if [ $is_first_chunk -eq 1 ]; then
            # If this is the first time the extra decks are called then assume
            # that the diagnostic files for the month before the start of the run
            # Do not exist and create the lock1 file but do not insert any file names
            # Append a line used as a delimiter to indicate the end of file names
            echo "#END" >> $lock1
            # Append the number "1" to the first lock1 file so that the lock file
            # will get deleted by the first job to run the clean_diag job.
            # In this case there will be only 1 job that will attempt to access lock1
            echo "1" >> $lock1
          else
            # Write file names for the first month to lock1
            for sfx in $clean_diag_suffix_list; do
              echo "${strng1}_$sfx" >> $lock1
            done
            # Append a line used as a delimiter to indicate the end of file names
            echo "#END" >> $lock1
            # Append the number "2" to each lock file
            # This will be used as a counter that will be decremented each time
            # a job finishes and runs the final job (created below)
            echo "2" >> $lock1
          fi
        fi
        if [ ! -s "$lock2" ]; then
          touch $lock2 || bail "Unable to create lock file $lock2"
          # Write file names for the second month to lock2
          for sfx in $clean_diag_suffix_list; do
            echo "${strng2}_$sfx" >> $lock2
          done
          # Append a line used as a delimiter to indicate the end of file names
          echo "#END" >> $lock2
          # Append the number "2" to each lock file
          # This will be used as a counter that will be decremented each time
          # a job finishes and runs the final job (created below)
          echo "2" >> $lock2
        fi
        
        # This eval is necessary to prevent the shebang line from being
        # blindly replaced with a different shell by submit3
        eval shebang=\#\!/\bin/\sh
        echo "$shebang" > clean_diag
        cat >> clean_diag << 'END_OF_CLEAN_DIAG'

  set -a
  . betapath2

  #  * ........................... Parmsub Parameters ....................
  jobname=clean_diag; crawork=clean_diag_job
  username=acrnxxx; user=XXX;
  lopgm="lopgm"; stime="3600"; memory1="600mb";

  #  * ............................ Condef Parameters ............................
  noprint=on
  nextjob=on

  # Alternate path to a directory where .queue/.crawork will be found
  JHOME=''

  if [ -n "$JHOME" -a x"$JHOME" != x"$HOME" ]; then
    # Allow optional reset of DATAPATH/RUNPATH
    JHOME_DATA=''
    DATAPATH=${JHOME_DATA:=$DATAPATH}
    RUNPATH=${JHOME_DATA:=$RUNPATH}
    # Allow optional reset of CCRNTMP
    JHOME_RUN=''
    CCRNTMP=${JHOME_RUN:=$CCRNTMP}
  fi

  #  * ............................. Deck Definition .............................
  . comjcl.cdk
  cat > Execute_Script <<'end_of_exec_script'

END_OF_CLEAN_DIAG

        # Add a line to tell the submission scripts to not source the following lines
        echo "    # ---Start_submit_ignore_code----" >> clean_diag

        # Assign lock files in the make script and insert them as strings
        # that contain no shell variables
        echo "    # Define the name of the lock files"  >> clean_diag
        echo "    lock1=$lock1" >> clean_diag
        echo "    lock2=$lock2" >> clean_diag

        cat >> clean_diag << 'END_OF_CLEAN_DIAG'

    bail(){
      echo `date`" --- clean_diag: $*"
      exit 1
    }

    # Read the counter from the end of each lock file
    if [ -s "$lock1" ]; then
      count1=`cat $lock1 | tail -1`
      [ -z "$count1" ] && bail "Error reading lock file $lock1"
    else
      count1=0
    fi

    if [ -s "$lock2" ]; then
      count2=`cat $lock2 | tail -1`
      [ -z "$count2" ] && bail "Error reading lock file $lock2"
    else
      count2=0
    fi

    if [ $count1 -eq 2 ]; then
      # If count = 2 then simply append a line containing the number "1"
      echo "1" >> $lock1 || bail "Unable to append to lock file $lock1"
    elif [ $count1 -eq 1 ]; then
      # If count = 1 then delete the files named in lock1
      for F in `cat $lock1`; do
        F=`echo $F|sed 's/ //g'`
        [ -z "$F" ] && continue
        [ $F = "#END" ] && break
        access $F $F nocp na
        delete $F na
      done
      # Then remove the lock file
      rm -f $lock1
    fi

    if [ $count2 -eq 2 ]; then
      # If count = 2 then simply append a line containing the number "1"
      echo "1" >> $lock2 || bail "Unable to append to lock file $lock2"
    elif [ $count2 -eq 1 ]; then
      # If count = 1 then delete the files named in lock2
      for F in `cat $lock2`; do
        F=`echo $F|sed 's/ //g'`
        [ -z "$F" ] && continue
        [ $F = "#END" ] && break
        access $F $F nocp na
        delete $F na
      done
      # Then remove the lock file
      rm -f $lock2
    fi

END_OF_CLEAN_DIAG

        # This is the end delimiter corresponding to Start_submit_ignore_code above
        echo "    # ---Stop_submit_ignore_code----" >> clean_diag

        cat >> clean_diag << 'END_OF_CLEAN_DIAG'

end_of_exec_script

  . endjcl.cdk

END_OF_CLEAN_DIAG

        # This line will get interperted as the end of the current job by the submission
        # scripts unless it is hidden from them via the following redirection
        echo "#end_of_job" >> clean_diag

        #DBG temporarily reset noprint
        #DBG eval noprint\=off

        # Define cccjob variables for the delete_diag string
        varlist='uxxx
                 runid
                 noprint
                 JHOME
                 JHOME_DATA
                 JHOME_RUN
                 CCCJOB_ROOT'

        # Ensure we use the same crawork definition as above
        cat > clean_diag_defs <<EOF
$craworkdef
EOF
        for var in $varlist; do
          eval val=\$$var
          # If this variable is defined add it to the list
          [ -n "$val" ] && echo ${var}=\'$val\' >> clean_diag_defs
        done

        clean_diag_start=`echo "--start=${clean_diag_start_year}:$clean_diag_start_mon"|sed 's/ //g'`
        clean_diag_stop=`echo "--stop=${clean_diag_stop_year}:$clean_diag_stop_mon"|sed 's/ //g'`

        # Create the clean_diag job string
        $CCCJOB_ENV cccjob --out=clean_diag_job --job="clean_diag:s" \
                           $clean_diag_start $clean_diag_stop clean_diag_defs

        # Append this job to the end of the job created above
        cat $pxdiag_job clean_diag_job > new_job
        mv new_job $pxdiag_job

      fi

      ##################################################################################
      # Create a job to remove any next_block lock file that may exist
      ##################################################################################
      if [ -n "$next_block_lock" -a -z "$stand_alone_craworkval" ]; then
        # If next_block_lock is defined it will be the name of a lock file that was created
        # by the "block" module. pxdiag should remove this lock file, if it exists, after
        # it finishes processing at the end of each time series chunk

        if [ -f $next_block_lock ]; then
          # The lock file exists as a regular file
          eval shebang=\#\!/\bin/\sh
          echo "$shebang" > rm_block_lock
          cat >> rm_block_lock << 'END_RM_BLOCK_LOCK'

  set -a
  . betapath2

  #  * ........................... Parmsub Parameters ....................
  jobname=rm_block_lock; crawork=rm_block_lock_job
  username=acrnxxx; user=XXX;
  lopgm="lopgm"; stime="3600"; memory1="600mb";

  #  * ............................ Condef Parameters ............................
  noprint=on
  nextjob=on

  # Alternate path to a directory where .queue/.crawork will be found
  JHOME=''

  if [ -n "$JHOME" -a x"$JHOME" != x"$HOME" ]; then
    # Allow optional reset of DATAPATH/RUNPATH
    JHOME_DATA=''
    DATAPATH=${JHOME_DATA:=$DATAPATH}
    RUNPATH=${JHOME_DATA:=$RUNPATH}
    # Allow optional reset of CCRNTMP
    JHOME_RUN=''
    CCRNTMP=${JHOME_RUN:=$CCRNTMP}

  fi

  #  * ............................. Deck Definition .............................
  . comjcl.cdk
  cat > Execute_Script <<'end_of_exec_script'

  # ---Start_submit_ignore_code----

END_RM_BLOCK_LOCK

          echo "  if [ -f $next_block_lock ]; then"  >> rm_block_lock
          echo "    rm -f $next_block_lock"          >> rm_block_lock
          echo "    echo removed $next_block_lock"   >> rm_block_lock
          echo "  fi"                                >> rm_block_lock

          cat >> rm_block_lock << 'END_RM_BLOCK_LOCK'

  # Ensure a normal (0) exit status
  exit

  # ---Stop_submit_ignore_code----

end_of_exec_script

  . endjcl.cdk

END_RM_BLOCK_LOCK

          echo "#end_of_job" >> rm_block_lock

          # Define cccjob variables for the delete_diag string
          varlist='uxxx
                   runid
                   noprint
                   JHOME
                   JHOME_DATA
                   JHOME_RUN
                   CCCJOB_ROOT'

          # Ensure we use the same crawork definition as above
          cat > rm_block_lock_defs <<EOF
$craworkdef
EOF
          for var in $varlist; do
            eval val=\$$var
            # If this variable is defined add it to the list
            [ -n "$val" ] && echo ${var}=\'$val\' >> rm_block_lock_defs
          done

          rm_block_lock_start=`echo "${chunk_start_year}:$chunk_start_mon"|sed 's/ //g'`
          rm_block_lock_stop=`echo "${chunk_stop_year}:$chunk_stop_mon"|sed 's/ //g'`

          # Create the rm_block_lock job string
          $CCCJOB_ENV cccjob --out=rm_block_lock_job --job="rm_block_lock:s" \
                        --start=$rm_block_lock_start --stop=$rm_block_lock_stop \
                        rm_block_lock_defs

          # Append this job to the end of the job created above
          cat $pxdiag_job rm_block_lock_job > new_job
          mv new_job $pxdiag_job
        fi
      fi

    fi

  ########################################################
  ###################### Section 3 #######################
  ########################################################
  # This section is done for every invocation of pxdump
  #
  # - Either submit the job created above to the front end
  #   or append it to an existing job string
  ########################################################

  DBG_submit=on
  if [ x"$DBG_submit" = xoff ]; then
    # This is a debugging run, do not submit but copy the job to the crawork dir
    DBG_job=${pxdiag_job}_$stamp
    cp $pxdiag_job ${JHOME:-$HOME}/.queue/.crawork/$DBG_job
    bail "Copied submission job to ${JHOME:-$HOME}/.queue/.crawork/$DBG_job"
  fi

  if [ -z "$stand_alone_craworkval" ]; then
    # Submit the job string just created to run on the front end
    if [ $running_on_gateway -eq 1 ]; then
      echo "pxdiag: morigin=$morigin"
      rsub -x mdest=$gateway $pxdiag_job

    else
      # Create a temporary directory in ~/tmp to hold submission files
      host=`hostname` || bail "Problem in hostname"
      psubdir=${JHOME:-$HOME}/tmp/tmp_pxdiag_${host}_$stamp
      mkdir -p $psubdir || bail "Cannot create $psubdir"

      this_remjob=$pxdiag_job
      cp $this_remjob $psubdir
      ssh $ssh_args $gateway "cd ${psubdir}; rsub mdest=$gateway $this_remjob" 2>&1 || \
          bail "***ERROR*** Remote submission failed for $this_remjob\n"

      # Clean up the temporary directory
      rm -fr $psubdir
    fi
  else
    # Append the job string just created to an existing job string
    # whose name is determined from stand_alone_craworkval

    # Check for existence of prior pdiag lock file

    # persistd is the dir to which these lock files were written
    persistd=`echo $pdiag_lock|sed 's/^\(.*\)\/.*$/\1/'`

    ncheck=0
    # Note, this loop will exit with an abort after 3 iterations
    while [ $ncheck -lt 10 ]; do
      ncheck=`expr $ncheck + 1`
      pdiag_lock_found=`(ls -1 ${persistd}/lock_pdiag_${runid}_* || : ) 2>/dev/null`
      if [ -z "$pdiag_lock_found" ]; then
        # There are no pdiag lock files in persistd
        # Assume that lock files are not being used
        lock_status=0
      else
        # At least 1 pdiag lock file was found in persistd
        # These lock file names are of the form
        #     lock_pdiag_${runid}_${start_date}_${stop_date}_$stamp
        # where stamp is a string that does not contain any underscore
        # and (start|stop)_date are both of the form YYYYmMM
        lock_status=1
        nlock=0
        for curr_dir in $pdiag_lock_found; do
          # "ls" will return a sorted list so if the current pdiag lock file
          # is not the first in the list then prior lock files exist
          nlock=`expr $nlock + 1`
          if [ x"$curr_dir" = x"$pdiag_lock" ]; then
            if [ $nlock -eq 1 ]; then
              # The current lock file is first in the list
              lock_status=0
            fi
            # Ignore lock files that are newer than the current lock file
            break
          fi
        done
      fi

      if [ $lock_status -eq 0 ]; then
        # There are not any pdiag lock files older than the current one
        break
      else
        # Prior lock files exist
        if [ $ncheck -ge 3 ]; then
          # Too many checks
          echo " "
          echo "pdiag lock files exist prior to $pdiag_lock"
          echo " "
          bail "pdiag lock files exist:\n$pdiag_lock_found"
        else
          # Try again in 2 minutes
          sleep 120
        fi
      fi
    done

    # Ensure that the crawork string exists and if not create it
    cwstring="${JHOME:-$HOME}/.queue/.crawork/${stand_alone_craworkval}_string"
    [ ! -s "$cwstring" ] && touch $cwstring

    # Append the current job to this crawork string
    eval craworkdef\=crawork\=$stand_alone_craworkval
    splice_job_string --position=-1 $pxdiag_job $craworkdef

    # Remove the pdiag lock file created by the invoking instance of pdiag
    [ ! -z "$pdiag_lock" ] && rm -f $pdiag_lock
  fi

  # sucessful completion
  exit 0

  # ---Stop_submit_ignore_code----

end_of_script

 . endjcl.cdk

#end_of_job
