diff --git a/.github/workflows/build-and-deploy.yml b/.github/workflows/build-and-deploy.yml
new file mode 100644
index 0000000..da26b98
--- /dev/null
+++ b/.github/workflows/build-and-deploy.yml
@@ -0,0 +1,48 @@
+name: Build and deploy
+
+on:
+ push:
+ branches:
+ - develop
+ - main
+ - feature/dg-98-build-and-configure-pipeline
+
+permissions:
+ contents: read
+ packages: write
+
+jobs:
+ php-lint:
+ name: PHP Lint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Setup Nodejs
+ uses: actions/setup-node@v4
+ - name: Set env.BRANCH
+ run: |
+ echo "BRANCH=$(echo $GITHUB_REF | cut -d'/' -f 3)" >> $GITHUB_ENV
+ - name: Build Node
+ run: ./orch/build_node.sh
+ - name: Install PHP
+ run: bash ./scripts/pipeline/deb-php-install.sh
+ - name: Install Linters and Sniffers
+ run: |
+ composer global config --no-plugins allow-plugins.dealerdirect/phpcodesniffer-composer-installer false
+ composer global require --dev drupal/coder php-parallel-lint/php-parallel-lint squizlabs/php_codesniffer=*
+ COMPOSER_DIR=$(composer -n config --global home)
+ $COMPOSER_DIR/vendor/bin/phpcs --config-set installed_paths $COMPOSER_DIR/vendor/drupal/coder/coder_sniffer,$COMPOSER_DIR/vendor/sirbrillig/phpcs-variable-analysis,$COMPOSER_DIR/vendor/slevomat/coding-standard
+ mkdir -p /tmp/results
+ touch /tmp/results/php-lint.log
+ touch /tmp/results/php-cs.log
+ touch /tmp/results/theme-lint.log
+ - name: PHP Lint
+ run: |
+ COMPOSER_DIR=$(composer -n config --global home)
+ $COMPOSER_DIR/vendor/bin/parallel-lint -e php,module,inc,install,test,profile,theme ./digital-gov-drupal
+ - name: PHP CodeSniff (Ignore warnings)
+ run: |
+ COMPOSER_DIR=$(composer -n config --global home)
+ $COMPOSER_DIR/vendor/bin/phpcs --standard=./digital-gov-drupal/.phpcs.xml.dist -v --warning-severity=0 ./digital-gov-drupal
+
\ No newline at end of file
diff --git a/.phpcs.xml.dist b/.phpcs.xml.dist
new file mode 100644
index 0000000..a33f7b8
--- /dev/null
+++ b/.phpcs.xml.dist
@@ -0,0 +1,62 @@
+
+
+
+ PHP_CodeSniffer standards overrides.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ web/modules/custom
+ web/themes/custom
+
+ */vendor/*
+ */node_modules/*
+ .github/
+ web/modules/custom/tome
+ web/modules/custom/samlauth
+
+
diff --git a/scripts/bash_exports.sh b/scripts/bash_exports.sh
new file mode 100755
index 0000000..d56d71f
--- /dev/null
+++ b/scripts/bash_exports.sh
@@ -0,0 +1,39 @@
+#! /bin/bash
+
+export SECRETS=$(echo $VCAP_SERVICES | jq -r '.["user-provided"][] | select(.name == "secrets") | .credentials')
+export SECAUTHSECRETS=$(echo $VCAP_SERVICES | jq -r '.["user-provided"][] | select(.name == "secauthsecrets") | .credentials')
+
+export APP_NAME=$(echo $VCAP_APPLICATION | jq -r '.name')
+export APP_ROOT=$(dirname "$0")
+export APP_ID=$(echo "$VCAP_APPLICATION" | jq -r '.application_id')
+
+export DB_NAME=$(echo $VCAP_SERVICES | jq -r '.["aws-rds"][] | .credentials.db_name')
+export DB_USER=$(echo $VCAP_SERVICES | jq -r '.["aws-rds"][] | .credentials.username')
+export DB_PW=$(echo $VCAP_SERVICES | jq -r '.["aws-rds"][] | .credentials.password')
+export DB_HOST=$(echo $VCAP_SERVICES | jq -r '.["aws-rds"][] | .credentials.host')
+export DB_PORT=$(echo $VCAP_SERVICES | jq -r '.["aws-rds"][] | .credentials.port')
+
+export ADMIN_EMAIL=$(echo $SECRETS | jq -r '.ADMIN_EMAIL')
+
+export ENV=$(echo "$VCAP_APPLICATION" | jq -r '.space_name' | rev | cut -d- -f1 | rev)
+
+export S3_BUCKET=$(echo "$VCAP_SERVICES" | jq -r '.["s3"][]? | select(.name == "storage") | .credentials.bucket')
+export S3_ENDPOINT=$(echo "$VCAP_SERVICES" | jq -r '.["s3"][]? | select(.name == "storage") | .credentials.fips_endpoint')
+
+export SPACE=$(echo $VCAP_APPLICATION | jq -r '.["space_name"]')
+export WWW_HOST=${WWW_HOST:-$(echo $VCAP_APPLICATION | jq -r '.["application_uris"][]' | grep 'beta\|www' | tr '\n' ' ')}
+export CMS_HOST=${CMS_HOST:-$(echo $VCAP_APPLICATION | jq -r '.["application_uris"][]' | grep cms | tr '\n' ' ')}
+if [ -z "$WWW_HOST" ]; then
+ export WWW_HOST="*.app.cloud.gov"
+elif [ -z "$CMS_HOST" ]; then
+ export CMS_HOST=$(echo $VCAP_APPLICATION | jq -r '.["application_uris"][]' | head -n 1)
+fi
+
+export S3_ROOT_WEB=${S3_ROOT_WEB:-/web}
+export S3_ROOT_CMS=${S3_ROOT_CMS:-/cms/public}
+export S3_HOST=${S3_HOST:-$S3_BUCKET.$S3_ENDPOINT}
+export S3_PROXY_WEB=${S3_PROXY_WEB:-$S3_HOST$S3_ROOT_WEB}
+export S3_PROXY_CMS=${S3_PROXY_CMS:-$S3_HOST$S3_ROOT_CMS}
+export S3_PROXY_PATH_CMS=${S3_PROXY_PATH_CMS:-/s3/files}
+
+export DNS_SERVER=${DNS_SERVER:-$(grep -i '^nameserver' /etc/resolv.conf|head -n1|cut -d ' ' -f2)}
diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh
new file mode 100755
index 0000000..e0a86d2
--- /dev/null
+++ b/scripts/bootstrap.sh
@@ -0,0 +1,76 @@
+#!/bin/bash
+set -uo pipefail
+
+## Export proxy servers.
+export http_proxy=$(echo ${VCAP_SERVICES} | jq -r '."user-provided"[].credentials.proxy_uri')
+export https_proxy=$(echo ${VCAP_SERVICES} | jq -r '."user-provided"[].credentials.proxy_uri')
+
+export home="/home/vcap"
+export app_path="${home}/app"
+export apt_path="${home}/deps/0/apt"
+
+echo $VCAP_SERVICES | jq -r '."user-provided"[].credentials.ca_certificate' | base64 -d > ${app_path}/ca_certificate.pem
+echo $VCAP_SERVICES | jq -r '."user-provided"[].credentials.ca_key' | base64 -d > ${app_path}/ca_key.pem
+
+chmod 600 ${app_path}/ca_certificate.pem
+chmod 600 ${app_path}/ca_key.pem
+
+if [ -z "${VCAP_SERVICES:-}" ]; then
+ echo "VCAP_SERVICES must a be set in the environment: aborting bootstrap";
+ exit 1;
+fi
+
+## NewRelic configuration
+export newrelic_apt="${apt_path}/usr/lib/newrelic-php5"
+export newrelic_app="${app_path}/newrelic/"
+
+rm -rf ${newrelic_app}/agent
+ln -s ${newrelic_apt}/agent ${newrelic_app}/agent
+
+rm -f ${newrelic_app}/daemon/newrelic-daemon.x64
+ln -s ${apt_path}/usr/bin/newrelic-daemon ${newrelic_app}/daemon/newrelic-daemon.x64
+
+rm -f ${app_path}/newrelic/scripts/newrelic-iutil.x64
+ln -s ${newrelic_apt}/scripts/newrelic-iutil.x64 ${newrelic_app}/scripts/newrelic-iutil.x64
+
+echo 'newrelic.daemon.collector_host=gov-collector.newrelic.com' >> ${app_path}/php/etc/php.ini
+
+source ${app_path}/scripts/bash_exports.sh
+
+if [ ! -f ./container_start_timestamp ]; then
+ touch ./container_start_timestamp
+ chmod a+r ./container_start_timestamp
+ echo "$(date +'%s')" > ./container_start_timestamp
+fi
+
+dirs=( "${HOME}/private" "${HOME}/web/sites/default/files" )
+
+for dir in $dirs; do
+ if [ ! -d $dir ]; then
+ echo "Creating ${dir} directory ... "
+ mkdir $dir
+ chown vcap. $dir
+ fi
+done
+
+## Updated ~/.bashrc to update $PATH when someone logs in.
+[ -z $(cat ${home}/.bashrc | grep PATH) ] && \
+ touch ${home}/.bashrc && \
+ echo "export http_proxy=${http_proxy}" >> ${home}/.bashrc && \
+ echo "export https_proxy=${https_proxy}" >> ${home}/.bashrc && \
+ echo "alias nano=\"${home}/deps/0/apt/bin/nano\"" >> ${home}/.bashrc && \
+ echo "PATH=$PATH:/home/vcap/app/php/bin:/home/vcap/app/vendor/drush/drush" >> /home/vcap/.bashrc
+
+source ${home}/.bashrc
+
+echo "Installing awscli..."
+{
+ curl -S "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"
+ unzip -qq /tmp/awscliv2.zip -d /tmp/
+ /tmp/aws/install --bin-dir ${home}/deps/0/bin --install-dir ${home}/deps/0/usr/local/aws-cli
+ rm -rf /tmp/awscliv2.zip /tmp/aws
+} >/dev/null 2>&1
+
+# if [ "${CF_INSTANCE_INDEX:-''}" == "0" ]; then
+# ${app_path}/scripts/post-deploy
+# fi
\ No newline at end of file
diff --git a/scripts/download_backup.sh b/scripts/download_backup.sh
new file mode 100755
index 0000000..e37b713
--- /dev/null
+++ b/scripts/download_backup.sh
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+set -e
+
+if [ "$(uname -s)" = "Darwin" ]; then
+ if ! hash brew 2>/dev/null ; then
+ echo "Please install Homebrew:
+ /bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\""
+ echo
+ echo "NOTE: You will need sudoer permission."
+ echo "Linux: https://linuxize.com/post/how-to-add-user-to-sudoers-in-ubuntu/"
+ echo "MacOS: https://osxdaily.com/2014/02/06/add-user-sudoers-file-mac/"
+ exit 1
+ fi
+
+ if ! hash gdate 2>/dev/null ; then
+ echo "Please install GNU coreutils:
+ Homebrew:
+ brew install coreutils"
+ exit 1
+ fi
+fi
+
+if ! hash cf 2>/dev/null ; then
+ echo "Please install cf version 8:
+ Linux: https://docs.cloudfoundry.org/cf-cli/install-go-cli.html
+ Homebrew:
+ brew tap cloudfoundry/tap
+ brew install cf-cli@8"
+ exit 1
+elif [[ "$(cf --version)" != *"cf version 8."* ]]; then
+ echo "Please install cf version 8:
+ Linux: https://docs.cloudfoundry.org/cf-cli/install-go-cli.html
+ Homebrew:
+ brew uninstall cf-cli
+ brew tap cloudfoundry/tap
+ brew install cf-cli@8"
+ exit 1
+fi
+
+if ! hash jq 2>/dev/null ; then
+ echo "Please install jq:
+ Linux: https://jqlang.github.io/jq/download/
+ Homebrew:
+ brew install jq"
+ exit 1
+fi
+
+# change which date command is used based on host OS
+date_command=''
+
+if [ "$(uname -s)" == "Darwin" ]; then
+ date_command=gdate
+else
+ date_command=date
+fi
+
+help(){
+ echo "Usage: $0 [options]" >&2
+ echo
+ echo " -b The name of the S3 bucket with the backup."
+ echo " -e Environment of backup to download."
+ echo " -s Name of the space the backup bucket is in."
+ echo " -d Date to retrieve backup from. Acceptable values
+ are 'latest' or in 'YYYY-MM-DD' format and no
+ more than 15 days ago."
+}
+
+RED='\033[0;31m'
+NC='\033[0m'
+
+while getopts 'b:e:s:d:' flag; do
+ case ${flag} in
+ b) backup_bucket=${OPTARG} ;;
+ e) env=${OPTARG} ;;
+ s) space=${OPTARG} ;;
+ d) retrieve_date=${OPTARG} ;;
+ *) help && exit 1 ;;
+ esac
+done
+
+[[ -z "${backup_bucket}" ]] && help && echo -e "\n${RED}Error: Missing -b flag.${NC}" && exit 1
+[[ -z "${env}" ]] && help && echo -e "\n${RED}Error: Missing -e flag.${NC}" && exit 1
+[[ -z "${space}" ]] && help && echo -e "\n${RED}Error: Missing -s flag.${NC}" && exit 1
+[[ -z "${retrieve_date}" ]] && help && echo -e "\n${RED}Error: Missing -d flag.${NC}" && exit 1
+
+echo "Getting backup bucket credentials..."
+{
+ cf target -s "${space}"
+
+ export service="${backup_bucket}"
+ export service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f
+ cf create-service-key "${service}" "${service_key}"
+ sleep 2
+ export s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+} >/dev/null 2>&1
+
+echo "Downloading backup..."
+{
+
+ aws s3 cp s3://${bucket}/${env}/${retrieve_date}.tar.gz . --no-verify-ssl 2>/dev/null
+ cf delete-service-key "${service}" "${service_key}" -f
+
+} >/dev/null 2>&1
+
+echo "File saved: ${retrieve_date}.tar.gz"
diff --git a/scripts/entrypoint b/scripts/entrypoint
new file mode 100755
index 0000000..64ce35f
--- /dev/null
+++ b/scripts/entrypoint
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+## Simple script to hold the container open.
+while : ; do sleep 60m ; done
\ No newline at end of file
diff --git a/scripts/pipeline/app-backup.sh b/scripts/pipeline/app-backup.sh
new file mode 100755
index 0000000..79ecdb3
--- /dev/null
+++ b/scripts/pipeline/app-backup.sh
@@ -0,0 +1,146 @@
+#!/bin/bash
+
+kill_pids() {
+ app=$1
+ ids=$(ps aux | grep ${app} | grep -v grep | awk '{print $2}')
+ for id in ${ids}; do
+ kill -9 ${id} >/dev/null 2>&1
+ done
+}
+
+## Wait for the tunnel to finish connecting.
+wait_for_tunnel() {
+ while : ; do
+ [ -n "$(grep 'Press Control-C to stop.' backup.txt)" ] && break
+ echo "Waiting for tunnel..."
+ sleep 1
+ done
+}
+
+date
+
+## Create a tunnel through the application to pull the database.
+echo "Creating tunnel to database..."
+if [[ ${BACKUP_ENV} = "prod" ]]; then
+ cf enable-ssh ${project}-drupal-${BACKUP_ENV}
+ cf restart --strategy rolling ${project}-drupal-${BACKUP_ENV}
+fi
+cf connect-to-service --no-client ${project}-drupal-${BACKUP_ENV} ${project}-mysql-${BACKUP_ENV} > backup.txt &
+
+wait_for_tunnel
+
+date
+
+## Create variables and credential file for MySQL login.
+echo "Backing up '${BACKUP_ENV}' database..."
+{
+ host=$(cat backup.txt | grep -i host | awk '{print $2}')
+ port=$(cat backup.txt | grep -i port | awk '{print $2}')
+ username=$(cat backup.txt | grep -i username | awk '{print $2}')
+ password=$(cat backup.txt | grep -i password | awk '{print $2}')
+ dbname=$(cat backup.txt | grep -i '^name' | awk '{print $2}')
+
+ mkdir ~/.mysql && chmod 0700 ~/.mysql
+
+ echo "[mysqldump]" > ~/.mysql/mysqldump.cnf
+ echo "user=${username}" >> ~/.mysql/mysqldump.cnf
+ echo "password=${password}" >> ~/.mysql/mysqldump.cnf
+ chmod 400 ~/.mysql/mysqldump.cnf
+
+ ## Exclude tables without data
+ declare -a excluded_tables=(
+ "cache_advagg_minify"
+ "cache_bootstrap"
+ "cache_config"
+ "cache_container"
+ "cache_data"
+ "cache_default"
+ "cache_discovery"
+ "cache_discovery_migration"
+ "cache_dynamic_page_cache"
+ "cache_entity"
+ "cache_menu"
+ "cache_migrate"
+ "cache_page"
+ "cache_render"
+ "cache_rest"
+ "cache_toolbar"
+ "sessions"
+ "watchdog"
+ "webprofiler"
+ )
+
+ ignored_tables_string=''
+ for table in "${excluded_tables[@]}"
+ do
+ ignored_tables_string+=" --ignore-table=${dbname}.${table}"
+ done
+
+ ## Dump structure
+ mysqldump \
+ --defaults-extra-file=~/.mysql/mysqldump.cnf \
+ --host=${host} \
+ --port=${port} \
+ --protocol=TCP \
+ --no-data \
+ ${dbname} > backup_${BACKUP_ENV}.sql
+
+ ## Dump content
+ mysqldump \
+ --defaults-extra-file=~/.mysql/mysqldump.cnf \
+ --host=${host} \
+ --port=${port} \
+ --protocol=TCP \
+ --no-create-info \
+ --skip-triggers \
+ ${ignored_tables_string} \
+ ${dbname} >> backup_${BACKUP_ENV}.sql
+
+ ## Patch out any MySQL 'SET' commands that require admin.
+ sed -i 's/^SET /-- &/' backup_${BACKUP_ENV}.sql
+
+} >/dev/null 2>&1
+
+date
+
+## Kill the backgrounded SSH tunnel.
+echo "Cleaning up old connections..."
+{
+ kill_pids "connect-to-service"
+} >/dev/null 2>&1
+
+## Clean up.
+if [ ${BACKUP_ENV} = "prod" ]; then
+ cf disable-ssh ${project}-drupal-${BACKUP_ENV}
+fi
+rm -rf backup.txt ~/.mysql
+
+date
+
+# Download media files.
+backup_media="cms/public/media"
+
+echo "Downloading media files..."
+{
+ cf target -s "${cf_space}"
+
+ service="${project}-storage-${BACKUP_ENV}"
+ service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f
+ cf create-service-key "${service}" "${service_key}"
+ sleep 2
+ s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+ rm -rf ${backup_media}
+ mkdir -p ${backup_media}
+ aws s3 sync --no-verify-ssl s3://${bucket}/${backup_media} ${backup_media}/ 2>/dev/null
+
+ cf delete-service-key "${service}" "${service_key}" -f
+} >/dev/null 2>&1
+
+date
diff --git a/scripts/pipeline/build-theme.sh b/scripts/pipeline/build-theme.sh
new file mode 100755
index 0000000..8858a1f
--- /dev/null
+++ b/scripts/pipeline/build-theme.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+npm install -g gulp
+cd web/themes/custom/digitalgov
+npm install
+npm run build
diff --git a/scripts/pipeline/cloud-gov-deploy.sh b/scripts/pipeline/cloud-gov-deploy.sh
new file mode 100755
index 0000000..25e5c23
--- /dev/null
+++ b/scripts/pipeline/cloud-gov-deploy.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+
+
+mv manifest.yml manifest.tmp
+envsubst < manifest.tmp > manifest.yml
+
+space=${BRANCH}
+[ "${BRANCH}" = "test" ] && space="develop"
+
+cf push --strategy rolling
+
+cf add-network-policy ${project}-drupal-${BRANCH} ${proxy_name} -s ${proxy_space} --protocol tcp --port ${proxy_port}
+cf add-network-policy ${waf_name} ${project}-drupal-${BRANCH} -s "${project}-${space}" --protocol tcp --port ${drupal_port}
diff --git a/scripts/pipeline/cloud-gov-login.sh b/scripts/pipeline/cloud-gov-login.sh
new file mode 100755
index 0000000..f050b79
--- /dev/null
+++ b/scripts/pipeline/cloud-gov-login.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+echo "Logging into Cloud.gov..."
+{
+ cf login \
+ -a https://api.fr.cloud.gov \
+ -u ${cloudgov_username} \
+ -p ${cloudgov_password} \
+ -o ${cf_org} \
+ -s ${cf_space} > login.log || login_error=1
+} >/dev/null 2>&1
+
+[ -n "${login_error}" ] && echo "Error logging into Cloud.gov!" && exit 1
+
+echo "Login successful!"
\ No newline at end of file
diff --git a/scripts/pipeline/cloud-gov-post-deploy-upkeep.sh b/scripts/pipeline/cloud-gov-post-deploy-upkeep.sh
new file mode 100755
index 0000000..35409a3
--- /dev/null
+++ b/scripts/pipeline/cloud-gov-post-deploy-upkeep.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -e
+
+# Enable SSH if in prod
+if [[ ${CIRCLE_BRANCH} = "prod" ]]; then
+ cf enable-ssh ${project}-drupal-${CIRCLE_BRANCH}
+ cf restart --strategy rolling ${project}-drupal-${CIRCLE_BRANCH}
+
+ # Wait until drupal app is running
+ until cf app ${project}-drupal-${CIRCLE_BRANCH} | grep running
+ do
+ sleep 1
+ done
+
+fi
+
+echo "Running upkeep..."
+cf ssh ${project}-drupal-${CIRCLE_BRANCH} --command "ENV=${CIRCLE_BRANCH} PATH=/home/vcap/deps/1/bin:/home/vcap/deps/0/bin:/usr/local/bin:/usr/bin:/bin:/home/vcap/app/php/bin:/home/vcap/app/php/sbin:/home/vcap/app/php/bin:/home/vcap/app/vendor/drush/drush app/scripts/upkeep >/dev/null 2>&1 && echo 'Successfully completed upkeep!' || echo 'Failed to complete upkeep!'"
+
+## Clean up.
+if [[ ${CIRCLE_BRANCH} = "prod" ]]; then
+ cf disable-ssh ${project}-drupal-${CIRCLE_BRANCH}
+fi
diff --git a/scripts/pipeline/cloud-gov-post-deploy.sh b/scripts/pipeline/cloud-gov-post-deploy.sh
new file mode 100755
index 0000000..e6360c9
--- /dev/null
+++ b/scripts/pipeline/cloud-gov-post-deploy.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -e
+
+# Wait until drupal app is running
+until cf app ${project}-drupal-${CIRCLE_BRANCH} | grep running
+do
+ sleep 1
+done
+
+# Enable SSH if in prod
+if [[ ${CIRCLE_BRANCH} = "prod" ]]; then
+ cf enable-ssh ${project}-drupal-${CIRCLE_BRANCH}
+ cf restart --strategy rolling ${project}-drupal-${CIRCLE_BRANCH}
+
+ # Wait until drupal app is running
+ until cf app ${project}-drupal-${CIRCLE_BRANCH} | grep running
+ do
+ sleep 1
+ done
+
+fi
+
+echo "Running post deploy steps..."
+cf ssh ${project}-drupal-${CIRCLE_BRANCH} --command "PATH=/home/vcap/deps/1/bin:/home/vcap/deps/0/bin:/usr/local/bin:/usr/bin:/bin:/home/vcap/app/php/bin:/home/vcap/app/php/sbin:/home/vcap/app/php/bin:/home/vcap/app/vendor/drush/drush app/scripts/post-deploy >/dev/null 2>&1 && echo 'Successfully completed post deploy!' || echo 'Failed to complete post deploy!'"
+
+## Clean up.
+if [[ ${CIRCLE_BRANCH} = "prod" ]]; then
+ cf disable-ssh ${project}-drupal-${CIRCLE_BRANCH}
+fi
diff --git a/scripts/pipeline/cloud-gov-remote-command.sh b/scripts/pipeline/cloud-gov-remote-command.sh
new file mode 100755
index 0000000..bfd84b6
--- /dev/null
+++ b/scripts/pipeline/cloud-gov-remote-command.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+application=$1
+command=$2
+
+PATH=$PATH:/home/vcap/app/php/bin:/home/vcap/app/vendor/drush/drush
+
+[ -z "${application}" ] || [ -z "${command}" ] && echo "Command error! Valid format: ${0} " && exit 1
+
+echo "Running command: '$(echo ${command} | cut -d' ' -f1,2)'..."
+{
+ cf ssh ${application} -c "PATH=${PATH}; ${command}"
+} >/dev/null 2>&1
diff --git a/scripts/pipeline/composer-install.sh b/scripts/pipeline/composer-install.sh
new file mode 100755
index 0000000..a333b60
--- /dev/null
+++ b/scripts/pipeline/composer-install.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+## To work for rootless and root images.
+echo "Installing composer..."
+{
+ EXPECTED_CHECKSUM="$(php -r 'copy("https://composer.github.io/installer.sig", "php://stdout");')"
+ php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
+ ACTUAL_CHECKSUM="$(php -r "echo hash_file('sha384', 'composer-setup.php');")"
+ if [ "$EXPECTED_CHECKSUM" != "$ACTUAL_CHECKSUM" ]; then
+ >&2 echo 'ERROR: Invalid installer checksum'
+ rm composer-setup.php
+ exit 1
+ fi
+
+ php composer-setup.php --quiet
+ RESULT=$?
+ rm composer-setup.php
+
+ chmod +x composer.phar
+ if [ "$(whoami)" != "root" ]; then
+ sudo mv composer.phar /usr/local/bin/composer
+ else
+ mv composer.phar /usr/local/bin/composer
+ fi
+ exit $RESULT
+} >/dev/null 2>&1
\ No newline at end of file
diff --git a/scripts/pipeline/deb-awscli.sh b/scripts/pipeline/deb-awscli.sh
new file mode 100755
index 0000000..43a9cf3
--- /dev/null
+++ b/scripts/pipeline/deb-awscli.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+## To work for rootless and root images.
+echo "Installing AWS CLI..."
+{
+ if [ "$(whoami)" != "root" ]; then
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ sudo ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update
+ else
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
+ unzip awscliv2.zip
+ ./aws/install --bin-dir /usr/local/bin --install-dir /usr/local/aws-cli --update
+ fi
+
+} >/dev/null 2>&1
+echo "Done!"
diff --git a/scripts/pipeline/deb-basic-deps.sh b/scripts/pipeline/deb-basic-deps.sh
new file mode 100755
index 0000000..bd01db0
--- /dev/null
+++ b/scripts/pipeline/deb-basic-deps.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+## To work for rootless and root images.
+echo "Installing basic dependencies..."
+{
+ if [ "$(whoami)" != "root" ]; then
+ sudo apt-get update
+ sudo apt-get install -y curl gettext
+ else
+ apt-get update
+ apt-get install -y curl gettext
+ fi
+} >/dev/null 2>&1
diff --git a/scripts/pipeline/deb-cf-install.sh b/scripts/pipeline/deb-cf-install.sh
new file mode 100755
index 0000000..e880251
--- /dev/null
+++ b/scripts/pipeline/deb-cf-install.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+echo "Installing CloudFoundry repository..."
+{
+ curl -L "https://packages.cloudfoundry.org/stable?release=linux64-binary&version=v8&source=github" | tar -zx
+ if [ "$(whoami)" != "root" ]; then
+ sudo mv cf cf8 /usr/local/bin
+ else
+ mv cf cf8 /usr/local/bin
+ fi
+} >/dev/null 2>&1
diff --git a/scripts/pipeline/deb-mysql-client-install.sh b/scripts/pipeline/deb-mysql-client-install.sh
new file mode 100755
index 0000000..57e68ef
--- /dev/null
+++ b/scripts/pipeline/deb-mysql-client-install.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+echo "Installing MySQL client..."
+{
+ ## To work for rootless and root images.
+ if [ "$(whoami)" != "root" ]; then
+ sudo apt-get update
+ sudo apt-get install -y mysql-client-8.0
+ else
+ apt-get update
+ apt-get install -y mysql-client-8.0
+ fi
+} >/dev/null 2>&1
diff --git a/scripts/pipeline/deb-php-install.sh b/scripts/pipeline/deb-php-install.sh
new file mode 100755
index 0000000..a20319a
--- /dev/null
+++ b/scripts/pipeline/deb-php-install.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+## To work for rootless and root images.
+echo "Installing PHP CLI..."
+{
+ if [ "$(whoami)" != "root" ]; then
+ sudo apt-get update
+ sudo apt-get install -y php-cli
+ else
+ apt-get update
+ apt-get install -y php-cli
+ fi
+} >/dev/null 2>&1
\ No newline at end of file
diff --git a/scripts/pipeline/downsync-preprod.sh b/scripts/pipeline/downsync-preprod.sh
new file mode 100755
index 0000000..eb4d645
--- /dev/null
+++ b/scripts/pipeline/downsync-preprod.sh
@@ -0,0 +1,112 @@
+#!/bin/bash
+if [ ${RESTORE_ENV} = 'prod' ]; then
+ echo "Restoring to prod is not allowed."
+ exit 1
+fi
+
+kill_pids() {
+ app=$1
+ ids=$(ps aux | grep ${app} | grep -v grep | awk '{print $2}')
+ for id in ${ids}; do
+ kill -9 ${id} >/dev/null 2>&1
+ done
+}
+
+## Wait for the tunnel to finish connecting.
+wait_for_tunnel() {
+ while : ; do
+ [ -n "$(grep 'Press Control-C to stop.' restore.txt)" ] && break
+ echo "Waiting for tunnel..."
+ sleep 1
+ done
+}
+
+date
+
+## Download latest prod backup.
+echo "Downloading latest prod database backup..."
+{
+ cf target -s "${project}-prod" >/dev/null 2>&1
+
+ export service="${project}-backup"
+ export service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f >/dev/null 2>&1
+ cf create-service-key "${service}" "${service_key}" >/dev/null 2>&1
+ sleep 2
+
+ export s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+ # copy latest database from top level
+ aws s3 cp s3://${bucket}/prod/latest.sql.gz ./latest.sql.gz --no-verify-ssl >/dev/null 2>&1 && echo "Successfully copied latest.sql.gz from S3!" || echo "Failed to copy latest.sql.gz from S3!"
+ gunzip latest.sql.gz
+
+ cf delete-service-key "${service}" "${service_key}" -f >/dev/null 2>&1
+}
+
+date
+
+## Create a tunnel through the application to restore the database.
+echo "Creating tunnel to database..."
+if [ ${RESTORE_ENV} = 'test' ]; then
+ cf target -s "${project}-dev" >/dev/null 2>&1
+else
+ cf target -s "${project}-${RESTORE_ENV}" >/dev/null 2>&1
+fi
+cf connect-to-service --no-client ${project}-drupal-${RESTORE_ENV} ${project}-mysql-${RESTORE_ENV} > restore.txt &
+
+wait_for_tunnel
+
+date
+
+## Create variables and credential file for MySQL login.
+echo "Restoring 'prod' database to '${RESTORE_ENV}'..."
+{
+ host=$(cat restore.txt | grep -i host | awk '{print $2}')
+ port=$(cat restore.txt | grep -i port | awk '{print $2}')
+ username=$(cat restore.txt | grep -i username | awk '{print $2}')
+ password=$(cat restore.txt | grep -i password | awk '{print $2}')
+ dbname=$(cat restore.txt | grep -i '^name' | awk '{print $2}')
+
+ mkdir ~/.mysql && chmod 0700 ~/.mysql
+
+ echo "[client]" > ~/.mysql/mysql.cnf
+ echo "user=${username}" >> ~/.mysql/mysql.cnf
+ echo "password=${password}" >> ~/.mysql/mysql.cnf
+ chmod 400 ~/.mysql/mysql.cnf
+
+ mysql \
+ --defaults-extra-file=~/.mysql/mysql.cnf \
+ --host=${host} \
+ --port=${port} \
+ --protocol=TCP \
+ --database=${dbname} < latest.sql
+
+} >/dev/null 2>&1
+
+date
+
+## Kill the backgrounded SSH tunnel.
+echo "Cleaning up old connections..."
+{
+ kill_pids "connect-to-service"
+} >/dev/null 2>&1
+
+## Clean up.
+rm -rf restore.txt ~/.mysql latest.sql
+
+date
+
+echo "Running 'drush cr' on '${RESTORE_ENV}' database..."
+source $(pwd $(dirname $0))/scripts/pipeline/cloud-gov-remote-command.sh "${project}-drupal-${RESTORE_ENV}" "drush cr"
+
+date
+
+echo "Running 'drush image-flush --all' on '${RESTORE_ENV}'..."
+source $(pwd $(dirname $0))/scripts/pipeline/cloud-gov-remote-command.sh "${project}-drupal-${RESTORE_ENV}" "drush image-flush --all"
+
+date
diff --git a/scripts/pipeline/exports.sh b/scripts/pipeline/exports.sh
new file mode 100755
index 0000000..1ea4d1a
--- /dev/null
+++ b/scripts/pipeline/exports.sh
@@ -0,0 +1,40 @@
+#! /bin/bash
+
+cf_env=$1
+
+export composer_no_dev=1
+if [ "${cf_env}" == "prod" ]; then
+ export cf_space=${prod_cf_space}
+ export cms_uri=${prod_cms_uri}
+ export drupal_instances=${prod_drupal_instances}
+ export drupal_memory=${prod_drupal_memory}
+ export ssg_uri=${prod_ssg_uri}
+ export waf_name=${prod_waf_name}
+ export waf_external_endpoint=${prod_waf_external_endpoint}
+elif [ "${cf_env}" == "stage" ]; then
+ export cf_space=${stage_cf_space}
+ export cms_uri=${stage_cms_uri}
+ export drupal_instances=${stage_drupal_instances}
+ export drupal_memory=${stage_drupal_memory}
+ export ssg_uri=${stage_ssg_uri}
+ export waf_name=${stage_waf_name}
+ export waf_external_endpoint=${stage_waf_external_endpoint}
+elif [ "${cf_env}" == "test" ]; then
+ export cf_space=${test_cf_space}
+ export cms_uri=${test_cms_uri}
+ export drupal_memory=${test_drupal_memory}
+ export drupal_instances=${test_drupal_instances}
+ export ssg_uri=${test_ssg_uri}
+ export waf_name=${test_waf_name}
+ export waf_external_endpoint=${test_waf_external_endpoint}
+ export composer_no_dev=0
+elif [ "${cf_env}" == "dev" ]; then
+ export cf_space=${dev_cf_space}
+ export cms_uri=${dev_cms_uri}
+ export drupal_instances=${dev_drupal_instances}
+ export drupal_memory=${dev_drupal_memory}
+ export ssg_uri=${dev_ssg_uri}
+ export waf_name=${dev_waf_name}
+ export waf_external_endpoint=${dev_waf_external_endpoint}
+ export composer_no_dev=0
+fi
diff --git a/scripts/pipeline/phpcs-install.sh b/scripts/pipeline/phpcs-install.sh
new file mode 100755
index 0000000..5dcfe85
--- /dev/null
+++ b/scripts/pipeline/phpcs-install.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+echo "Installing PHP CodeSniffer..."
+{
+ git clone --branch 8.3.x http://git.drupal.org/project/coder.git
+ cd coder
+ composer install
+ cd ..
+ export PATH="$PATH:$HOME/project/coder/vendor/bin"
+} >/dev/null 2>&1
\ No newline at end of file
diff --git a/scripts/pipeline/prod-db-backup.sh b/scripts/pipeline/prod-db-backup.sh
new file mode 100755
index 0000000..f1e432e
--- /dev/null
+++ b/scripts/pipeline/prod-db-backup.sh
@@ -0,0 +1,144 @@
+#!/bin/bash
+if [ ${CIRCLE_BRANCH} != 'prod' ]; then
+ echo "This script is for backing up the prod database only."
+ exit 1
+fi
+
+kill_pids() {
+ app=$1
+ ids=$(ps aux | grep ${app} | grep -v grep | awk '{print $2}')
+ for id in ${ids}; do
+ kill -9 ${id} >/dev/null 2>&1
+ done
+}
+
+## Wait for the tunnel to finish connecting.
+wait_for_tunnel() {
+ while : ; do
+ [ -n "$(grep 'Press Control-C to stop.' backup.txt)" ] && break
+ echo "Waiting for tunnel..."
+ sleep 1
+ done
+}
+
+date
+
+## Create a tunnel through the application to pull the database.
+echo "Creating tunnel to database..."
+cf enable-ssh ${project}-drupal-prod
+cf restart --strategy rolling ${project}-drupal-prod
+cf connect-to-service --no-client ${project}-drupal-prod ${project}-mysql-prod > backup.txt &
+
+wait_for_tunnel
+
+date
+
+## Create variables and credential file for MySQL login.
+echo "Backing up 'prod' database..."
+{
+ host=$(cat backup.txt | grep -i host | awk '{print $2}')
+ port=$(cat backup.txt | grep -i port | awk '{print $2}')
+ username=$(cat backup.txt | grep -i username | awk '{print $2}')
+ password=$(cat backup.txt | grep -i password | awk '{print $2}')
+ dbname=$(cat backup.txt | grep -i '^name' | awk '{print $2}')
+
+ mkdir ~/.mysql && chmod 0700 ~/.mysql
+
+ echo "[mysqldump]" > ~/.mysql/mysqldump.cnf
+ echo "user=${username}" >> ~/.mysql/mysqldump.cnf
+ echo "password=${password}" >> ~/.mysql/mysqldump.cnf
+ chmod 400 ~/.mysql/mysqldump.cnf
+
+ ## Exclude tables without data
+ declare -a excluded_tables=(
+ "cache_advagg_minify"
+ "cache_bootstrap"
+ "cache_config"
+ "cache_container"
+ "cache_data"
+ "cache_default"
+ "cache_discovery"
+ "cache_discovery_migration"
+ "cache_dynamic_page_cache"
+ "cache_entity"
+ "cache_menu"
+ "cache_migrate"
+ "cache_page"
+ "cache_render"
+ "cache_rest"
+ "cache_toolbar"
+ "sessions"
+ "watchdog"
+ "webprofiler"
+ )
+
+ ignored_tables_string=''
+ for table in "${excluded_tables[@]}"
+ do
+ ignored_tables_string+=" --ignore-table=${dbname}.${table}"
+ done
+
+ ## Dump structure
+ mysqldump \
+ --defaults-extra-file=~/.mysql/mysqldump.cnf \
+ --host=${host} \
+ --port=${port} \
+ --protocol=TCP \
+ --no-data \
+ ${dbname} > backup_prod.sql
+
+ ## Dump content
+ mysqldump \
+ --defaults-extra-file=~/.mysql/mysqldump.cnf \
+ --host=${host} \
+ --port=${port} \
+ --protocol=TCP \
+ --no-create-info \
+ --skip-triggers \
+ ${ignored_tables_string} \
+ ${dbname} >> backup_prod.sql
+
+ ## Patch out any MySQL 'SET' commands that require admin.
+ sed -i 's/^SET /-- &/' backup_prod.sql
+
+} >/dev/null 2>&1
+
+date
+
+## Kill the backgrounded SSH tunnel.
+echo "Cleaning up old connections..."
+{
+ kill_pids "connect-to-service"
+} >/dev/null 2>&1
+
+## Disable ssh.
+echo "Disabling ssh..."
+cf disable-ssh ${project}-drupal-prod
+
+rm -rf backup.txt ~/.mysql
+
+echo "Saving to backup bucket..."
+{
+ cf target -s "${project}-prod" >/dev/null 2>&1
+
+ export service="${project}-backup"
+ export service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f >/dev/null 2>&1
+ cf create-service-key "${service}" "${service_key}" >/dev/null 2>&1
+ sleep 2
+
+ export s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+ # copy latest database to top level
+ gzip backup_prod.sql
+ aws s3 cp ./backup_prod.sql.gz s3://${bucket}/prod/latest.sql.gz --no-verify-ssl >/dev/null 2>&1 && echo "Successfully copied latest.sql.gz to S3!" || echo "Failed to copy latest.sql.gz to S3!"
+
+ cf delete-service-key "${service}" "${service_key}" -f >/dev/null 2>&1
+}
+
+date
diff --git a/scripts/pipeline/scheduled-backup.sh b/scripts/pipeline/scheduled-backup.sh
new file mode 100755
index 0000000..93ef4ea
--- /dev/null
+++ b/scripts/pipeline/scheduled-backup.sh
@@ -0,0 +1,149 @@
+#!/bin/bash
+
+backup_space=$1
+
+export BACKUP_ENV=${CIRCLE_BRANCH}
+
+export backup_folder=$(date "+%Y/%m/%d")
+export now=$(date +"%H.%M.%S")
+export today=$(date +%F)
+
+backup_media="cms/public/media"
+
+rm -rf scheduled_backup/
+
+mkdir -p scheduled_backup
+cd scheduled_backup
+
+date
+
+echo "Downloading media files..."
+{
+ cf target -s "${cf_space}"
+
+ service="${project}-storage-${BACKUP_ENV}"
+ service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f
+ cf create-service-key "${service}" "${service_key}"
+ sleep 2
+ s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+ rm -rf ${BACKUP_ENV}
+
+ aws s3 sync --delete --no-verify-ssl s3://${bucket}/${backup_media} ${backup_media}/ 2>/dev/null
+ tar czvf media_${now}.tar.gz ${backup_media}
+
+
+ cf delete-service-key "${service}" "${service_key}" -f
+} >/dev/null 2>&1
+
+date
+
+echo "Downloading static files..."
+{
+ cf target -s "${cf_space}"
+
+ service="${project}-static-${BACKUP_ENV}"
+ service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f
+ cf create-service-key "${service}" "${service_key}"
+ sleep 2
+ s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+ rm -rf ${BACKUP_ENV}
+
+ aws s3 sync --no-verify-ssl s3://${bucket}/ static_files/ 2>/dev/null
+ tar czvf static_${now}.tar.gz static_files/
+
+
+ cf delete-service-key "${service}" "${service_key}" -f
+} >/dev/null 2>&1
+
+date
+
+echo "Downloading terraform state..."
+{
+ cf target -s "${backup_space}"
+
+ service="${project}-terraform-backend"
+ service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f
+ cf create-service-key "${service}" "${service_key}"
+
+ sleep 2
+ s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+ rm -rf "env:"
+ aws s3 cp --recursive --no-verify-ssl s3://${bucket}/ . 2>/dev/null
+
+ tar czf terraform_state_${now}.tar.gz "env:"
+
+ cf delete-service-key "${service}" "${service_key}" -f
+} >/dev/null 2>&1
+
+date
+
+echo "Saving to backup bucket..."
+{
+ cf target -s "${backup_space}" >/dev/null 2>&1
+
+ export service="${project}-backup"
+ export service_key="${service}-key"
+ cf delete-service-key "${service}" "${service_key}" -f >/dev/null 2>&1
+ cf create-service-key "${service}" "${service_key}" >/dev/null 2>&1
+ sleep 2
+
+ export s3_credentials=$(cf service-key "${service}" "${service_key}" | tail -n +2)
+
+ export AWS_ACCESS_KEY_ID=$(echo "${s3_credentials}" | jq -r '.credentials.access_key_id')
+ export bucket=$(echo "${s3_credentials}" | jq -r '.credentials.bucket')
+ export AWS_DEFAULT_REGION=$(echo "${s3_credentials}" | jq -r '.credentials.region')
+ export AWS_SECRET_ACCESS_KEY=$(echo "${s3_credentials}" | jq -r '.credentials.secret_access_key')
+
+ rm -f backup_${now}.sql
+ cp ../backup_${BACKUP_ENV}.sql backup_${now}.sql
+ gzip backup_${now}.sql
+
+ aws s3 cp ./ s3://${bucket}/${BACKUP_ENV}/${backup_folder} --exclude "*" --include "*.sql.gz" --include "*.tar.gz" --recursive --no-verify-ssl >/dev/null 2>&1
+
+ tar czf latest.tar.gz *.gz
+
+ # delete latest and backups older than 15 days in the env's top level directory
+ aws s3 rm s3://${bucket}/${BACKUP_ENV}/latest.tar.gz --no-verify-ssl >/dev/null 2>&1
+ aws s3 ls s3://${bucket}/${BACKUP_ENV}/ | while read -r line; do
+ create_date=$(echo $line | awk {'print $1" "$2'})
+ create_date=$(date --date "$create_date" +%s 2>/dev/null)
+ older_than=$(date --date "15 days ago" +%s)
+ if [[ $create_date -le $older_than ]]; then
+ filename=$(echo $line | awk {'print $4'})
+ if [[ $filename != "" ]]; then
+ aws s3 rm s3://${bucket}/${BACKUP_ENV}/$filename --no-verify-ssl >/dev/null 2>&1 && echo "Successfully deleted $filename from S3!" || echo "Failed to delete $filename from S3!"
+ fi
+ fi
+ done;
+
+ aws s3 cp ./latest.tar.gz s3://${bucket}/${BACKUP_ENV}/ --no-verify-ssl >/dev/null 2>&1 && echo "Successfully copied latest.tar.gz to S3!" || echo "Failed to copy latest.tar.gz to S3!"
+ aws s3 cp ./latest.tar.gz s3://${bucket}/${BACKUP_ENV}/${today}.tar.gz --no-verify-ssl >/dev/null 2>&1 && echo "Successfully copied ${today}.tar.gz to S3!" || echo "Failed to copy ${today}.tar.gz to S3!"
+
+ # copy latest database to top level
+ aws s3 cp ./backup_${now}.sql.gz s3://${bucket}/${BACKUP_ENV}/latest.sql.gz --no-verify-ssl >/dev/null 2>&1 && echo "Successfully copied latest.sql.gz to S3!" || echo "Failed to copy latest.sql.gz to S3!"
+
+ cf delete-service-key "${service}" "${service_key}" -f >/dev/null 2>&1
+}
+
+date
diff --git a/scripts/post-deploy b/scripts/post-deploy
new file mode 100755
index 0000000..411dfb6
--- /dev/null
+++ b/scripts/post-deploy
@@ -0,0 +1,17 @@
+#! /bin/bash
+
+echo "Updating drupal ... "
+drush state:set system.maintenance_mode 1 -y
+drush cr
+drush updatedb --no-cache-clear -y
+drush cim -y
+drush locale-check
+drush locale-update
+
+echo "Uploading public files to S3 ..."
+drush s3fs-rc
+drush s3fs-cl -y --scheme=public --condition=newer
+
+drush cr
+drush state:set system.maintenance_mode 0 -y
+echo "Post deploy finished!"
diff --git a/scripts/start b/scripts/start
new file mode 100755
index 0000000..ec34810
--- /dev/null
+++ b/scripts/start
@@ -0,0 +1,13 @@
+#! /bin/bash
+
+home="/home/vcap"
+app_path="${home}/app"
+
+## Start PHP FPM
+${app_path}/php/sbin/php-fpm -p "${app_path}/php/etc" -y "${app_path}/php/etc/php-fpm.conf" -c "${app_path}/php/etc" &
+
+## Start Apache
+${app_path}/httpd/bin/apachectl -f "${app_path}/httpd/conf/httpd.conf" -k start -DFOREGROUND &
+
+## Run entry point
+${app_path}/scripts/entrypoint &
\ No newline at end of file
diff --git a/scripts/upkeep b/scripts/upkeep
new file mode 100755
index 0000000..bb76f31
--- /dev/null
+++ b/scripts/upkeep
@@ -0,0 +1,131 @@
+#!/bin/bash
+set -e
+
+home="/home/vcap"
+app_path="${home}/app"
+html_path="${app_path}/html"
+
+source ${home}/.bashrc
+
+mkdir -p ${html_path}
+
+export PYTHONWARNINGS="ignore:Unverified HTTPS request"
+
+export application_uri=$(echo $VCAP_APPLICATION | jq -r '.application_uris[]')
+
+export AWS_ACCESS_KEY_ID=$(echo $VCAP_SERVICES | jq -r '.s3[] | select(.name | strings | test("static")).credentials.access_key_id')
+export AWS_SECRET_ACCESS_KEY=$(echo $VCAP_SERVICES | jq -r '.s3[] | select(.name | strings | test("static")).credentials.secret_access_key')
+export AWS_DEFAULT_REGION=$(echo $VCAP_SERVICES | jq -r '.s3[] | select(.name | strings | test("static")).credentials.region')
+
+export bucket_name=$(echo $VCAP_SERVICES | jq -r '.s3[] | select(.name | strings | test("static")).name')
+export bucket=$(echo $VCAP_SERVICES | jq -r '.s3[] | select(.name | strings | test("static")).credentials.bucket')
+export bucket_endpoint=$(echo $VCAP_SERVICES | jq -r '.s3[] | select(.name | strings | test("static")).credentials.endpoint')
+
+export ssg_endpoint="https://ssg-${environment}.vote.gov"
+[ "${environment}" = "stage" ] && export ssg_endpoint="https://staging.vote.gov"
+[ "${environment}" = "prod" ] && export ssg_endpoint="https://vote.gov"
+export ssg_sitemap_endpoint=ssg_endpoint
+
+cd ${app_path}
+echo "**************************************************"
+echo "Running 'drush cron' in '${environment}'..."
+echo "**************************************************"
+drush --uri=${ssg_endpoint} cron
+echo "'drush cron' task...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Running 'drush tome:static' in '${environment}'..."
+echo "**************************************************"
+drush tome:static --uri=${ssg_endpoint} --path-count=1 --retry-count=3 -y
+drush tome:static-export-path '/sitemap.xml,/sitemap_generator/default/sitemap.xsl' --uri=${ssg_sitemap_endpoint} --retry-count=3 -y
+drush cr
+echo "'drush tome:static' task...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Performing find and replace tasks..."
+echo "**************************************************"
+echo "-- Replace s3-based file urls with relative urls"
+echo "**************************************************"
+find ${html_path} -type f -exec grep -l 'http[s]*://[^/]\+/s3/files' {} \; -exec sed -i 's#http[s]*://[^/]\+/s3/files#/s3/files#g' {} +
+echo "**************************************************"
+echo "-- Replace absolute urls with relative urls in generated files"
+echo "**************************************************"
+find ${html_path}/sites/default/files -type f -exec grep -l "${ssg_endpoint}/" {} \; -exec sed -i "s#${ssg_endpoint}/#/#g" {} +
+echo "Performing find and replace tasks...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Adding missing Core assets vendor directory..."
+echo "**************************************************"
+mkdir -p ${html_path}/core/assets
+cp -rfp ${app_path}/web/core/assets/vendor ${html_path}/core/assets/
+echo "Missing Core assets files...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Adding missing module files for Sitemap..."
+echo "**************************************************"
+mkdir -p ${html_path}/modules/contrib/simple_sitemap/xsl
+cp -rfp ${app_path}/web/modules/contrib/simple_sitemap/xsl/* ${html_path}/modules/contrib/simple_sitemap/xsl/
+echo "Missing module files for Sitemap...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Adding missing data files for NVRF..."
+echo "**************************************************"
+cp -rfp ${app_path}/web/data ${html_path}/data
+echo "Missing data files for NVRF...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Adding Vote.gov custom theme assets..."
+echo "**************************************************"
+mkdir -p ${html_path}/themes/custom/votegov
+cp -rfp ${app_path}/web/themes/custom/votegov/dist ${html_path}/themes/custom/votegov/
+cp -rfp ${app_path}/web/themes/custom/votegov/fonts ${html_path}/themes/custom/votegov/
+cp -rfp ${app_path}/web/themes/custom/votegov/img ${html_path}/themes/custom/votegov/
+cp -rfp ${app_path}/web/themes/custom/votegov/json ${html_path}/themes/custom/votegov/
+echo "Adding Vote.gov custom theme assets...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Deleting 'node' redirect files..."
+echo "**************************************************"
+find ${html_path} -type d -name "node" -print0 | while IFS= read -r -d '' node_dir; do
+ # Find index.html files within each 'node' directory that are a redirect.
+ find "$node_dir" -type f -path "*/index.html" -exec grep -q "http-equiv=\"refresh\"" {} \; -delete -exec dirname {} \;
+done
+echo "Deleting 'node' redirect files...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Deleting taxonomy directories..."
+echo "**************************************************"
+rm -rf ${html_path}/taxonomy 2>/dev/null
+echo "Deleting taxonomy directories...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Removing miscellaneous files..."
+echo "**************************************************"
+rm -rf ${html_path}/disabled-state-mail-in-forms 2>/dev/null
+echo "Removing miscellaneous files...completed!"
+echo ""
+
+echo "**************************************************"
+echo "Removing empty directories..."
+echo "**************************************************"
+find ${html_path} -type d -empty -delete
+echo "Removing empty directories...completed!"
+echo ""
+
+cd ${html_path}
+echo "**************************************************"
+echo "Copying static files to '${bucket_name}'..."
+echo "**************************************************"
+aws s3 sync . s3://${bucket} --delete --no-verify-ssl 2>/dev/null
+aws s3 website s3://${bucket} --index-document index.html --error-document /404/index.html --no-verify-ssl 2>/dev/null
+echo "Copy to '${bucket_name}'...completed!"
+echo ""