Skip to content

Commit

Permalink
Merge pull request #69 from Guslington/feature/sqs
Browse files Browse the repository at this point in the history
Feature/sqs
  • Loading branch information
Guslington authored Jan 15, 2019
2 parents ce3e224 + 646df68 commit 819a89b
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 20 deletions.
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -391,10 +391,10 @@ $ shelvery ebs pull_shared_backups

## Waiting on backups to complete

By default shelvery will wait by sleeping and then querying the aws api for a complete status.
By default shelvery will wait by sleeping and then querying the aws api for a complete status.
If this is not your preferred method you can offload the sleep to SQS to save costs on lambda compute.

You can set the sqs url and the wait period (seconds) before lambda is invoked to check on the status of the backup.
You can set the sqs url and the wait period (seconds) before lambda is invoked to check on the status of the backup.
If the backup is not complete it will be passed back to sqs to wait for the same period.

```text
Expand All @@ -414,4 +414,4 @@ shelvery_sqs_queue_wait_period=300
2. Run the `deploy-sam-template.sh` script with the options to deploy the template in the target account.

- `-b` [required] source bucket to deploy the sam package to
- `-v` [optional] shelvery version to deploy, defaults to `0.8.0`
- `-v` [optional] shelvery version to deploy, defaults to `0.8.1`
2 changes: 1 addition & 1 deletion deploy-sam-template.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#!/bin/bash
set -e

SHELVERY_VERSION=0.8.0
SHELVERY_VERSION=0.8.1

while getopts ":b:v:a:" opt; do
case $opt in
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from setuptools import setup

setup(name='shelvery', version='0.8.0', author='Base2Services R&D',
setup(name='shelvery', version='0.8.1', author='Base2Services R&D',
author_email='itsupport@base2services.com',
url='http://github.com/base2Services/shelvery-aws-backups',
classifiers=[
Expand Down
2 changes: 1 addition & 1 deletion shelvery/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = '0.8.0'
__version__ = '0.8.1'
LAMBDA_WAIT_ITERATION = 'lambda_wait_iteration'
S3_DATA_PREFIX = 'backups'
SHELVERY_DO_BACKUP_TAGS = ['True', 'true', '1', 'TRUE']
28 changes: 14 additions & 14 deletions shelvery/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,15 +452,16 @@ def do_copy_backup(self, map_args={}, **kwargs):
"""

kwargs.update(map_args)

backup_id = kwargs['BackupId']
origin_region = kwargs['OriginRegion']
# if backup is not available, exit and rely on recursive lambda call copy backup
# in non lambda mode this should never happen
if RuntimeConfig.is_offload_queueing(self):
if not self.is_backup_available(backup_region, backup_id):
self.store_backup_data(self.get_backup_resource(backup_region, backup_id))
if not self.is_backup_available(origin_region,backup_id):
self.copy_backup(self.get_backup_resource(origin_region, backup_id))
else:
if not self.wait_backup_available(backup_region=kwargs['OriginRegion'],
backup_id=kwargs['BackupId'],
if not self.wait_backup_available(backup_region=origin_region,
backup_id=backup_id,
lambda_method='do_copy_backup',
lambda_args=kwargs):
return
Expand Down Expand Up @@ -544,21 +545,21 @@ def do_copy_backup(self, map_args={}, **kwargs):
def do_share_backup(self, map_args={}, **kwargs):
"""Share backup with other AWS account, actual implementation"""
kwargs.update(map_args)

backup_id = kwargs['BackupId']
backup_region = kwargs['Region']
backup_resource = self.get_backup_resource(backup_region, backup_id)
# if backup is not available, exit and rely on recursive lambda call do share backup
# in non lambda mode this should never happen
if RuntimeConfig.is_offload_queueing(self):
if not self.is_backup_available(backup_region, backup_id):
self.store_backup_data(self.get_backup_resource(backup_region, backup_id))
self.share_backup(self.get_backup_resource(backup_region, backup_id))
else:
if not self.wait_backup_available(backup_region=kwargs['Region'],
backup_id=kwargs['BackupId'],
if not self.wait_backup_available(backup_region=backup_region,
backup_id=backup_id,
lambda_method='do_share_backup',
lambda_args=kwargs):
return

backup_region = kwargs['Region']
backup_id = kwargs['BackupId']
destination_account_id = kwargs['AwsAccountId']
self.logger.info(f"Do share backup {backup_id} ({backup_region}) with {destination_account_id}")
try:
Expand Down Expand Up @@ -615,20 +616,19 @@ def do_store_backup_data(self, map_args={}, **kwargs):
kwargs.update(map_args)
backup_id = kwargs['BackupId']
backup_region = kwargs['BackupRegion']

backup_resource = self.get_backup_resource(backup_region, backup_id)
# if backup is not available, exit and rely on recursive lambda call write metadata
# in non lambda mode this should never happen
if RuntimeConfig.is_offload_queueing(self):
if not self.is_backup_available(backup_region, backup_id):
self.store_backup_data(self.get_backup_resource(backup_region, backup_id))
self.store_backup_data(backup_resource)
else:
if not self.wait_backup_available(backup_region=backup_region,
backup_id=backup_id,
lambda_method='do_store_backup_data',
lambda_args=kwargs):
return

backup_resource = self.get_backup_resource(backup_region, backup_id)
if backup_resource.account_id is None:
backup_resource.account_id = self.account_id
bucket = self._get_data_bucket(backup_resource.region)
Expand Down

0 comments on commit 819a89b

Please sign in to comment.