Cloud Deployment: Heroku, AWS, and GCP for Python Applications
Deploying Python applications to the cloud requires understanding different platforms, deployment strategies, and scaling considerations. This guide covers practical patterns for deploying to Heroku, AWS, and GCP.
Heroku Deployment
Procfile Configuration
# Procfile
web: gunicorn app:app
worker: celery -A app.celery worker
beat: celery -A app.celery beat
Heroku Deployment Script
import subprocess
import os
class HerokuDeployer:
"""Deploy Python app to Heroku."""
def __init__(self, app_name):
self.app_name = app_name
def login(self):
"""Login to Heroku."""
subprocess.run(['heroku', 'login'], check=True)
def create_app(self):
"""Create Heroku app."""
subprocess.run(
['heroku', 'create', self.app_name],
check=True
)
def set_config(self, config_vars):
"""Set environment variables."""
for key, value in config_vars.items():
subprocess.run(
['heroku', 'config:set', f'{key}={value}', '-a', self.app_name],
check=True
)
def deploy(self):
"""Deploy to Heroku."""
subprocess.run(['git', 'push', 'heroku', 'main'], check=True)
def scale_dynos(self, dyno_type, quantity):
"""Scale dynos."""
subprocess.run(
['heroku', 'ps:scale', f'{dyno_type}={quantity}', '-a', self.app_name],
check=True
)
def view_logs(self):
"""View application logs."""
subprocess.run(['heroku', 'logs', '--tail', '-a', self.app_name])
# Usage
# deployer = HerokuDeployer('my-python-app')
# deployer.login()
# deployer.create_app()
# deployer.set_config({'DATABASE_URL': 'postgresql://...'})
# deployer.deploy()
requirements.txt for Heroku
Flask==2.3.0
gunicorn==20.1.0
python-dotenv==1.0.0
psycopg2-binary==2.9.0
redis==4.5.0
celery==5.2.0
AWS Deployment
Elastic Beanstalk Deployment
import boto3
import json
class EBDeployer:
"""Deploy to AWS Elastic Beanstalk."""
def __init__(self, app_name, env_name, region='us-east-1'):
self.app_name = app_name
self.env_name = env_name
self.eb_client = boto3.client('elasticbeanstalk', region_name=region)
self.s3_client = boto3.client('s3', region_name=region)
def create_application(self):
"""Create Elastic Beanstalk application."""
try:
self.eb_client.create_application(
ApplicationName=self.app_name,
Description='Python application'
)
print(f"Created application: {self.app_name}")
except self.eb_client.exceptions.TooManyApplicationsException:
print(f"Application {self.app_name} already exists")
def create_environment(self, instance_type='t3.micro'):
"""Create Elastic Beanstalk environment."""
self.eb_client.create_environment(
ApplicationName=self.app_name,
EnvironmentName=self.env_name,
SolutionStackName='64bit Amazon Linux 2 v5.5.0 running Python 3.9',
InstanceType=instance_type,
OptionSettings=[
{
'Namespace': 'aws:elasticbeanstalk:application:environment',
'OptionName': 'PYTHONPATH',
'Value': '/var/app/current'
}
]
)
print(f"Created environment: {self.env_name}")
def upload_version(self, bucket, key, file_path):
"""Upload application version to S3."""
self.s3_client.upload_file(file_path, bucket, key)
print(f"Uploaded {file_path} to s3://{bucket}/{key}")
def deploy_version(self, bucket, key, version_label):
"""Deploy application version."""
self.eb_client.create_application_version(
ApplicationName=self.app_name,
VersionLabel=version_label,
SourceBundle={
'S3Bucket': bucket,
'S3Key': key
}
)
self.eb_client.update_environment(
ApplicationName=self.app_name,
EnvironmentName=self.env_name,
VersionLabel=version_label
)
print(f"Deployed version: {version_label}")
def get_environment_status(self):
"""Get environment status."""
response = self.eb_client.describe_environments(
ApplicationName=self.app_name,
EnvironmentNames=[self.env_name]
)
if response['Environments']:
env = response['Environments'][0]
return {
'status': env['Status'],
'health': env['HealthStatus'],
'endpoint': env.get('EndpointURL')
}
return None
# Usage
# deployer = EBDeployer('my-app', 'production')
# deployer.create_application()
# deployer.create_environment()
.ebextensions Configuration
# .ebextensions/python.config
option_settings:
aws:elasticbeanstalk:container:python:
WSGIPath: app:app
aws:elasticbeanstalk:application:environment:
PYTHONPATH: /var/app/current
aws:autoscaling:asg:
MinSize: 2
MaxSize: 6
aws:autoscaling:trigger:
MeasureName: CPUUtilization
Statistic: Average
Unit: Percent
UpperThreshold: 70
LowerThreshold: 30
commands:
01_install_dependencies:
command: "pip install -r requirements.txt"
Lambda Deployment
import boto3
import zipfile
import os
class LambdaDeployer:
"""Deploy Python function to AWS Lambda."""
def __init__(self, function_name, region='us-east-1'):
self.function_name = function_name
self.lambda_client = boto3.client('lambda', region_name=region)
def create_deployment_package(self, source_dir, output_file):
"""Create deployment package."""
with zipfile.ZipFile(output_file, 'w') as zf:
for root, dirs, files in os.walk(source_dir):
for file in files:
file_path = os.path.join(root, file)
arcname = os.path.relpath(file_path, source_dir)
zf.write(file_path, arcname)
print(f"Created deployment package: {output_file}")
def deploy_function(self, zip_file, role_arn, handler='lambda_function.lambda_handler'):
"""Deploy Lambda function."""
with open(zip_file, 'rb') as f:
zip_content = f.read()
try:
response = self.lambda_client.create_function(
FunctionName=self.function_name,
Runtime='python3.9',
Role=role_arn,
Handler=handler,
Code={'ZipFile': zip_content},
Timeout=60,
MemorySize=256
)
print(f"Created function: {self.function_name}")
return response
except self.lambda_client.exceptions.ResourceConflictException:
# Update existing function
response = self.lambda_client.update_function_code(
FunctionName=self.function_name,
ZipFile=zip_content
)
print(f"Updated function: {self.function_name}")
return response
def invoke_function(self, payload):
"""Invoke Lambda function."""
response = self.lambda_client.invoke(
FunctionName=self.function_name,
InvocationType='RequestResponse',
Payload=json.dumps(payload)
)
return json.loads(response['Payload'].read())
# Usage
# deployer = LambdaDeployer('my-function')
# deployer.create_deployment_package('src', 'lambda.zip')
# deployer.deploy_function('lambda.zip', 'arn:aws:iam::123456789:role/lambda-role')
GCP Deployment
Cloud Run Deployment
import subprocess
import json
class CloudRunDeployer:
"""Deploy Python app to Google Cloud Run."""
def __init__(self, project_id, service_name, region='us-central1'):
self.project_id = project_id
self.service_name = service_name
self.region = region
def build_image(self, dockerfile_path, image_name):
"""Build Docker image."""
subprocess.run([
'gcloud', 'builds', 'submit',
'--tag', f'gcr.io/{self.project_id}/{image_name}',
'--project', self.project_id,
dockerfile_path
], check=True)
def deploy_service(self, image_name, memory='512Mi', cpu='1'):
"""Deploy to Cloud Run."""
subprocess.run([
'gcloud', 'run', 'deploy', self.service_name,
'--image', f'gcr.io/{self.project_id}/{image_name}',
'--platform', 'managed',
'--region', self.region,
'--memory', memory,
'--cpu', cpu,
'--allow-unauthenticated',
'--project', self.project_id
], check=True)
def set_environment_variables(self, env_vars):
"""Set environment variables."""
env_string = ','.join([f'{k}={v}' for k, v in env_vars.items()])
subprocess.run([
'gcloud', 'run', 'services', 'update', self.service_name,
'--update-env-vars', env_string,
'--region', self.region,
'--project', self.project_id
], check=True)
def get_service_url(self):
"""Get service URL."""
result = subprocess.run([
'gcloud', 'run', 'services', 'describe', self.service_name,
'--region', self.region,
'--format', 'value(status.url)',
'--project', self.project_id
], capture_output=True, text=True, check=True)
return result.stdout.strip()
# Usage
# deployer = CloudRunDeployer('my-project', 'my-service')
# deployer.build_image('.', 'my-app')
# deployer.deploy_service('my-app')
# url = deployer.get_service_url()
App Engine Deployment
import subprocess
class AppEngineDeployer:
"""Deploy Python app to Google App Engine."""
def __init__(self, project_id):
self.project_id = project_id
def deploy(self, app_yaml_path=None):
"""Deploy to App Engine."""
cmd = [
'gcloud', 'app', 'deploy',
'--project', self.project_id,
'--quiet'
]
if app_yaml_path:
cmd.append(app_yaml_path)
subprocess.run(cmd, check=True)
print(f"Deployed to App Engine")
def get_app_url(self):
"""Get App Engine URL."""
return f"https://{self.project_id}.appspot.com"
# Usage
# deployer = AppEngineDeployer('my-project')
# deployer.deploy()
app.yaml Configuration
# app.yaml
runtime: python39
env: standard
entrypoint: gunicorn -b :$PORT app:app
env_variables:
LOG_LEVEL: "INFO"
automatic_scaling:
min_instances: 1
max_instances: 10
target_cpu_utilization: 0.65
target_throughput_utilization: 0.6
Deployment Best Practices
Health Checks
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/health', methods=['GET'])
def health_check():
"""Health check endpoint."""
return jsonify({
'status': 'healthy',
'version': '1.0.0'
}), 200
@app.route('/ready', methods=['GET'])
def readiness_check():
"""Readiness check endpoint."""
try:
# Check database connection
# db.session.execute('SELECT 1')
return jsonify({'ready': True}), 200
except Exception as e:
return jsonify({'ready': False, 'error': str(e)}), 503
Graceful Shutdown
import signal
import sys
class GracefulShutdown:
"""Handle graceful shutdown."""
def __init__(self, app):
self.app = app
self.shutdown_requested = False
signal.signal(signal.SIGTERM, self._signal_handler)
signal.signal(signal.SIGINT, self._signal_handler)
def _signal_handler(self, signum, frame):
print(f"Received signal {signum}")
self.shutdown_requested = True
def run(self):
"""Run with graceful shutdown."""
self.app.run()
# Usage
# shutdown = GracefulShutdown(app)
# shutdown.run()
Common Pitfalls and Best Practices
โ Bad: Hardcoded Configuration
# DON'T: Hardcode configuration
DATABASE_URL = "postgresql://user:pass@localhost/db"
โ Good: Use Environment Variables
# DO: Use environment variables
DATABASE_URL = os.getenv('DATABASE_URL')
โ Bad: No Health Checks
# DON'T: Deploy without health checks
โ Good: Implement Health Checks
# DO: Implement health and readiness checks
@app.route('/health')
def health():
return {'status': 'ok'}
โ Bad: No Graceful Shutdown
# DON'T: Kill process immediately
โ Good: Graceful Shutdown
# DO: Handle shutdown signals gracefully
signal.signal(signal.SIGTERM, shutdown_handler)
Summary
Cloud deployment requires:
- Platform selection based on requirements
- Containerization with Docker
- Configuration management with environment variables
- Health checks for reliability
- Graceful shutdown for data integrity
- Auto-scaling for performance
- Monitoring and logging for observability
- CI/CD integration for automation
These patterns ensure reliable, scalable, and maintainable cloud deployments.
Comments