Introduction
Feature flags transform how teams ship software. By decoupling deployment from release, teams can test in production, roll out gradually, and revert instantly. This guide covers feature flag implementation, experimentation, and building a culture of controlled rollouts.
Feature flags (or toggles) are conditional logic controls that enable or disable features without deploying new code.
Types of Feature Flags
Classification
| Type | Purpose | Lifetime | Scope |
|---|---|---|---|
| Release | Safely deploy unreleased features | Short | Developer |
| Experiment | A/B testing | Medium | Product |
| Ops | Feature for operations | Medium | Ops |
| Permission | User-specific features | Long | Customer |
Implementation Patterns
# Simple feature flag
class FeatureFlags:
def __init__(self):
self.flags = {
"new_checkout": True,
"dark_mode": False,
"ai_recommendations": {
"enabled": True,
"percentage": 10 # 10% rollout
}
}
def is_enabled(self, flag_name):
"""Check if feature is enabled."""
return self.flags.get(flag_name, False)
def is_enabled_for_user(self, flag_name, user_id):
"""Check with percentage rollout."""
flag = self.flags.get(flag_name, {})
if isinstance(flag, dict):
if not flag.get("enabled", False):
return False
percentage = flag.get("percentage", 100)
# Consistent hashing for same user
return hash(f"{flag_name}:{user_id}") % 100 < percentage
return flag
# Usage
flags = FeatureFlags()
def render_checkout():
if flags.is_enabled("new_checkout"):
return render_new_checkout()
return render_old_checkout()
Feature Flag Services
LaunchDarkly
# LaunchDarkly SDK
import launchdarkly_client
ld_client = launchdarkly_client.InitiateClient("sdk-key")
# Evaluate flag
user = {
"key": "user-123",
"email": "[email protected]"
}
show_new_feature = ld_client.bool_variation(
"new-feature", user, False
)
if show_new_feature:
show_new_checkout()
else:
show_old_checkout()
# Track events
ld_client.track("purchase", user, {"value": 99.99})
Unleash
# Unleash client
from unleash_client import UnleashClient
client = UnleashClient(
url="https://unleash.example.com/api",
app_name="payment-service",
environment="prod"
)
client.start()
# Check flag
if client.is_enabled("new-payment-flow"):
enable_new_flow()
Postgres-Based Flags
# Self-hosted feature flags
class PostgresFeatureFlags:
def __init__(self, db_pool):
self.db = db_pool
def get_flags(self, user_id):
"""Get all flags for user."""
query = """
SELECT f.name, f.enabled, f.rollout_percentage
FROM feature_flags f
WHERE f.enabled = true
"""
flags = self.db.execute(query)
return {
f["name"]: self._evaluate_flag(f, user_id)
for f in flags
}
def _evaluate_flag(self, flag, user_id):
if flag["rollout_percentage"] is None:
return True
return hash(f"{flag['name']}:{user_id}") % 100 < flag["rollout_percentage"]
Experimentation
A/B Testing Setup
# Simple A/B test implementation
class Experiment:
def __init__(self, name, variants):
"""
variants: {"control": 50, "treatment": 50}
"""
self.name = name
self.variants = variants
def assign_variant(self, user_id):
"""Assign user to variant."""
# Consistent assignment
bucket = hash(f"{self.name}:{user_id}") % 100
cumulative = 0
for variant, percentage in self.variants.items():
cumulative += percentage
if bucket < cumulative:
return variant
return "control"
# Usage
checkout_experiment = Experiment(
"new_checkout",
{"control": 50, "treatment": 50}
)
variant = checkout_experiment.assign_variant(current_user.id)
if variant == "treatment":
show_new_checkout()
track_event("checkout_viewed", {"variant": "treatment"})
else:
show_old_checkout()
track_event("checkout_viewed", {"variant": "control"})
Statistical Analysis
# Calculate experiment results
import math
from dataclasses import dataclass
@dataclass
class ExperimentResults:
control_conversions: int
treatment_conversions: int
control_size: int
treatment_size: int
@property
def control_rate(self):
return self.control_conversions / self.control_size
@property
def treatment_rate(self):
return self.treatment_conversions / self.treatment_size
def confidence_level(self):
"""Calculate statistical significance."""
n1, n2 = self.control_size, self.treatment_size
p1, p2 = self.control_rate, self.treatment_rate
# Pooled proportion
p = (self.control_conversions + self.treatment_conversions) / (n1 + n2)
# Standard error
se = math.sqrt(p * (1 - p) * (1/n1 + 1/n2))
# Z-score
z = (p2 - p1) / se
# Convert to confidence
if z > 1.96:
return "95% confident - significant"
elif z > 1.645:
return "90% confident"
else:
return "Not significant"
Gradual Rollouts
Canary Deployment
# Kubernetes canary with feature flags
apiVersion: v1
kind: Service
metadata:
name: payment-service
spec:
selector:
app: payment-service
ports:
- port: 80
targetPort: 8080
---
# Stable deployment (90%)
apiVersion: apps/v1
kind: Deployment
metadata:
name: payment-service-stable
spec:
replicas: 9
selector:
matchLabels:
app: payment-service
version: stable
template:
spec:
containers:
- name: payment
image: payment-service:v2.0
env:
- name: FEATURE_NEW_CHECKOUT
value: "false"
---
# Canary deployment (10%)
apiVersion: apps/v1
kind: Deployment
metadata:
name: payment-service-canary
spec:
replicas: 1
selector:
matchLabels:
app: payment-service
version: canary
template:
spec:
containers:
- name: payment
image: payment-service:v2.0
env:
- name: FEATURE_NEW_CHECKOUT
value: "true"
Progressive Rollout
# Automated progressive rollout
async def progressive_rollout(flag_name, target_percentage, step=10):
"""Gradually increase rollout percentage."""
current = 0
while current < target_percentage:
current += step
# Update flag
await update_flag_percentage(flag_name, current)
# Wait and monitor
await wait(1 hour)
# Check metrics
error_rate = await get_error_rate(flag_name)
if error_rate > 0.01: # 1% threshold
print(f"Error rate {error_rate} exceeded threshold, rolling back")
await update_flag_percentage(flag_name, 0)
break
print(f"Rollout: {current}% - error rate: {error_rate}")
Best Practices
- Keep flags short-lived: Remove after full rollout
- Name clearly: Use descriptive names
- Default to off: Safer for new features
- Monitor metrics: Track both flag and business metrics
- Document features: Know what each flag controls
- Clean up: Remove unused flags regularly
Conclusion
Feature flags enable safe, controlled software delivery. By implementing proper experimentation and monitoring, teams can validate features in production before full rollout.
Comments