Introduction
PostgreSQL powers production systems across virtually every industry. This article explores real-world use cases with practical implementation patterns, covering e-commerce, financial systems, data warehousing, geospatial applications, and more.
E-commerce Platforms
Database Schema Design
-- Users and authentication
CREATE TABLE users (
user_id SERIAL PRIMARY KEY,
email VARCHAR(255) UNIQUE NOT NULL,
password_hash VARCHAR(255) NOT NULL,
first_name VARCHAR(100),
last_name VARCHAR(100),
phone VARCHAR(20),
is_active BOOLEAN DEFAULT TRUE,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
-- Addresses
CREATE TABLE addresses (
address_id SERIAL PRIMARY KEY,
user_id INTEGER REFERENCES users(user_id),
address_type VARCHAR(20) CHECK (address_type IN ('billing', 'shipping')),
street VARCHAR(255),
city VARCHAR(100),
state VARCHAR(100),
postal_code VARCHAR(20),
country CHAR(2) DEFAULT 'US',
is_default BOOLEAN DEFAULT FALSE
);
-- Products
CREATE TABLE products (
product_id SERIAL PRIMARY KEY,
sku VARCHAR(50) UNIQUE NOT NULL,
name VARCHAR(255) NOT NULL,
description TEXT,
category_id INTEGER REFERENCES categories(category_id),
price DECIMAL(10, 2) NOT NULL,
cost DECIMAL(10, 2),
stock_quantity INTEGER DEFAULT 0,
is_active BOOLEAN DEFAULT TRUE,
attributes JSONB DEFAULT '{}',
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
-- Categories (self-referencing for hierarchy)
CREATE TABLE categories (
category_id SERIAL PRIMARY KEY,
parent_id INTEGER REFERENCES categories(category_id),
name VARCHAR(100) NOT NULL,
slug VARCHAR(100) UNIQUE NOT NULL
);
-- Orders
CREATE TABLE orders (
order_id SERIAL PRIMARY KEY,
user_id INTEGER REFERENCES users(user_id),
shipping_address_id INTEGER REFERENCES addresses(address_id),
billing_address_id INTEGER REFERENCES addresses(address_id),
status VARCHAR(20) DEFAULT 'pending'
CHECK (status IN ('pending', 'processing', 'shipped', 'delivered', 'cancelled')),
subtotal DECIMAL(10, 2),
tax_amount DECIMAL(10, 2),
shipping_amount DECIMAL(10, 2),
total_amount DECIMAL(10, 2),
currency CHAR(3) DEFAULT 'USD',
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
shipped_at TIMESTAMPTZ,
delivered_at TIMESTAMPTZ
);
-- Order items
CREATE TABLE order_items (
order_item_id SERIAL PRIMARY KEY,
order_id INTEGER REFERENCES orders(order_id),
product_id INTEGER REFERENCES products(product_id),
quantity INTEGER NOT NULL,
unit_price DECIMAL(10, 2) NOT NULL,
total_price DECIMAL(10, 2) NOT NULL
);
-- Indexes for common queries
CREATE INDEX idx_orders_user_id ON orders(user_id);
CREATE INDEX idx_orders_status_created ON orders(status, created_at DESC);
CREATE INDEX idx_order_items_order_id ON order_items(order_id);
CREATE INDEX idx_products_sku ON products(sku);
CREATE INDEX idx_products_category ON products(category_id);
Common E-commerce Queries
-- Get user's order history
SELECT o.order_id, o.total_amount, o.status, o.created_at,
json_agg(json_build_object(
'product_name', p.name,
'quantity', oi.quantity,
'price', oi.unit_price
)) AS items
FROM orders o
JOIN order_items oi ON o.order_id = oi.order_id
JOIN products p ON oi.product_id = p.product_id
WHERE o.user_id = $1
GROUP BY o.order_id
ORDER BY o.created_at DESC;
-- Products in price range with category
SELECT p.*, c.name AS category_name
FROM products p
JOIN categories c ON p.category_id = c.category_id
WHERE p.price BETWEEN $min_price AND $max_price
AND p.is_active = TRUE
ORDER BY p.price;
-- Running totals by day
SELECT DATE(created_at) AS day,
COUNT(*) AS order_count,
SUM(total_amount) AS revenue
FROM orders
WHERE created_at >= CURRENT_DATE - INTERVAL '30 days'
GROUP BY DATE(created_at)
ORDER BY day;
Shopping Cart Implementation
-- Cart table
CREATE TABLE carts (
cart_id SERIAL PRIMARY KEY,
session_id VARCHAR(100) UNIQUE,
user_id INTEGER REFERENCES users(user_id),
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE cart_items (
cart_item_id SERIAL PRIMARY KEY,
cart_id INTEGER REFERENCES carts(cart_id),
product_id INTEGER REFERENCES products(product_id),
quantity INTEGER DEFAULT 1,
added_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
UNIQUE(cart_id, product_id)
);
-- Update cart item or insert
INSERT INTO cart_items (cart_id, product_id, quantity)
VALUES ($cart_id, $product_id, $quantity)
ON CONFLICT (cart_id, product_id)
DO UPDATE SET quantity = cart_items.quantity + EXCLUDED.quantity;
-- Calculate cart total
SELECT SUM(p.price * ci.quantity) AS total
FROM cart_items ci
JOIN products p ON ci.product_id = p.product_id
WHERE ci.cart_id = $cart_id;
Financial Systems
Double-Entry Accounting
-- Chart of accounts
CREATE TABLE accounts (
account_id SERIAL PRIMARY KEY,
account_number VARCHAR(20) UNIQUE NOT NULL,
name VARCHAR(255) NOT NULL,
account_type VARCHAR(20) NOT NULL
CHECK (account_type IN ('asset', 'liability', 'equity', 'revenue', 'expense')),
parent_id INTEGER REFERENCES accounts(account_id),
is_active BOOLEAN DEFAULT TRUE
);
-- Transactions
CREATE TABLE transactions (
transaction_id SERIAL PRIMARY KEY,
description TEXT,
reference_number VARCHAR(100),
transaction_date DATE NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
created_by INTEGER REFERENCES users(user_id)
);
-- Transaction lines (debits must equal credits)
CREATE TABLE transaction_lines (
line_id SERIAL PRIMARY KEY,
transaction_id INTEGER REFERENCES transactions(transaction_id),
account_id INTEGER REFERENCES accounts(account_id),
debit DECIMAL(15, 2) DEFAULT 0,
credit DECIMAL(15, 2) DEFAULT 0,
CHECK (debit >= 0 AND credit >= 0),
CHECK (debit = 0 OR credit = 0)
);
-- Ensure debits = credits (trigger)
CREATE OR REPLACE FUNCTION validate_transaction_balance()
RETURNS TRIGGER AS $$
BEGIN
IF NEW.transaction_id IS NOT NULL THEN
IF EXISTS (
SELECT 1 FROM transaction_lines
WHERE transaction_id = NEW.transaction_id
GROUP BY transaction_id
HAVING SUM(debit) != SUM(credit)
) THEN
RAISE EXCEPTION 'Debits must equal credits';
END IF;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER transaction_balance
AFTER INSERT OR UPDATE ON transaction_lines
FOR EACH ROW EXECUTE FUNCTION validate_transaction_balance();
Ledger with Balance Tracking
-- Account balances (materialized)
CREATE TABLE account_balances (
account_id INTEGER REFERENCES accounts(account_id),
as_of_date DATE,
balance DECIMAL(15, 2) DEFAULT 0,
PRIMARY KEY (account_id, as_of_date)
);
-- Update balances function
CREATE OR REPLACE FUNCTION update_account_balance(
p_account_id INTEGER,
p_date DATE,
p_amount DECIMAL(15, 2)
) RETURNS VOID AS $$
BEGIN
INSERT INTO account_balances (account_id, as_of_date, balance)
VALUES (p_account_id, p_date, p_amount)
ON CONFLICT (account_id, as_of_date)
DO UPDATE SET balance = account_balances.balance + p_amount;
END;
$$ LANGUAGE plpgsql;
-- Get account balance at date
SELECT
COALESCE(SUM(
CASE WHEN account_type IN ('asset', 'expense') THEN debit - credit
ELSE credit - debit END
), 0) AS balance
FROM transaction_lines tl
JOIN accounts a ON tl.account_id = a.account_id
JOIN transactions t ON tl.transaction_id = t.transaction_id
WHERE tl.account_id = $account_id
AND t.transaction_date <= $as_of_date;
Transaction Isolation for Financial Data
-- Serializable isolation for critical transactions
SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
BEGIN;
-- Check balance before deduction
SELECT balance FROM accounts WHERE account_id = $from_account;
-- Perform transfer
INSERT INTO transactions (description, transaction_date)
VALUES ('Transfer', CURRENT_DATE);
INSERT INTO transaction_lines VALUES
(currval('transactions_transaction_id_seq'), $from_account, 100, 0),
(currval('transactions_transaction_id_seq'), $to_account, 0, 100);
COMMIT;
-- Handle serialization failure
DO $$
BEGIN
WHILE TRUE LOOP
BEGIN
-- Your transaction here
COMMIT;
EXIT;
EXCEPTION WHEN serialization_failure THEN
-- Retry
CONTINUE;
END;
END LOOP;
END $$;
Data Warehousing
Schema Design
-- Star schema: Fact sales
CREATE TABLE fact_sales (
sale_id BIGSERIAL,
date_key INTEGER REFERENCES dim_date(date_key),
product_key INTEGER REFERENCES dim_product(product_key),
customer_key INTEGER REFERENCES dim_customer(customer_key),
store_key INTEGER REFERENCES dim_store(store_key),
quantity INTEGER NOT NULL,
unit_price DECIMAL(10, 2) NOT NULL,
discount DECIMAL(10, 2) DEFAULT 0,
total_amount DECIMAL(12, 2) NOT NULL,
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
-- Dimension: Date
CREATE TABLE dim_date (
date_key INTEGER PRIMARY KEY,
date DATE NOT NULL,
day_of_week SMALLINT,
day_name VARCHAR(10),
month SMALLINT,
month_name VARCHAR(10),
quarter SMALLINT,
year INTEGER,
is_weekend BOOLEAN,
is_holiday BOOLEAN
);
-- Dimension: Product
CREATE TABLE dim_product (
product_key SERIAL PRIMARY KEY,
product_id INTEGER,
product_name VARCHAR(255),
category VARCHAR(100),
subcategory VARCHAR(100),
brand VARCHAR(100),
supplier_key INTEGER
);
-- Dimension: Customer
CREATE TABLE dim_customer (
customer_key SERIAL PRIMARY KEY,
customer_id INTEGER,
customer_name VARCHAR(255),
email VARCHAR(255),
segment VARCHAR(50),
city VARCHAR(100),
state VARCHAR(100),
country VARCHAR(100),
first_purchase_date DATE,
lifetime_value DECIMAL(12, 2)
);
-- Aggregate tables
CREATE TABLE agg_sales_by_month (
year_month INTEGER,
category VARCHAR(100),
total_revenue DECIMAL(15, 2),
total_quantity INTEGER,
order_count INTEGER,
unique_customers INTEGER,
PRIMARY KEY (year_month, category)
);
-- Materialized view for reporting
CREATE MATERIALIZED VIEW mv_sales_summary AS
SELECT
d.year,
d.quarter,
d.month,
p.category,
SUM(s.total_amount) AS revenue,
SUM(s.quantity) AS quantity,
COUNT(DISTINCT s.customer_key) AS customers,
COUNT(*) AS orders
FROM fact_sales s
JOIN dim_date d ON s.date_key = d.date_key
JOIN dim_product p ON s.product_key = p.product_key
GROUP BY d.year, d.quarter, d.month, p.category
WITH DATA;
-- Refresh on schedule
-- 0 2 * * * psql -c "REFRESH MATERIALIZED VIEW CONCURRENTLY mv_sales_summary" mydb
Analytics Queries
-- Year-over-year comparison
SELECT
d.year,
d.month,
SUM(s.total_amount) AS current_revenue,
LAG(SUM(s.total_amount)) OVER (
PARTITION BY d.month ORDER BY d.year
) AS previous_year_revenue,
ROUND(
100 * (SUM(s.total_amount) - LAG(SUM(s.total_amount)) OVER (
PARTITION BY d.month ORDER BY d.year
)) / NULLIF(LAG(SUM(s.total_amount)) OVER (
PARTITION BY d.month ORDER BY d.year
), 0), 2
) AS yoy_growth_pct
FROM fact_sales s
JOIN dim_date d ON s.date_key = d.date_key
WHERE d.year >= EXTRACT(YEAR FROM CURRENT_DATE) - 2
GROUP BY d.year, d.month
ORDER BY d.year, d.month;
-- Customer cohorts
WITH cohort AS (
SELECT
c.customer_key,
DATE_TRUNC('month', c.first_purchase_date) AS cohort_month,
DATE_TRUNC('month', s.created_at) AS purchase_month,
SUM(s.total_amount) AS amount
FROM fact_sales s
JOIN dim_customer c ON s.customer_key = c.customer_key
GROUP BY c.customer_key, c.first_purchase_date, s.created_at
)
SELECT
cohort_month,
COUNT(DISTINCT customer_key) AS cohort_size,
COUNT(purchase_month) AS total_orders,
SUM(amount) AS total_revenue
FROM cohort
GROUP BY cohort_month
ORDER BY cohort_month;
Geospatial Applications (PostGIS)
Setting Up PostGIS
-- Enable PostGIS
CREATE EXTENSION postgis;
-- Create spatial table
CREATE TABLE locations (
location_id SERIAL PRIMARY KEY,
name VARCHAR(255),
geom GEOMETRY(Point, 4326), -- WGS84
address TEXT,
category VARCHAR(50)
);
-- Create spatial index
CREATE INDEX idx_locations_geom ON locations USING GIST (geom);
Geospatial Queries
-- Find nearby locations
SELECT name,
ST_Distance(
geom,
ST_SetSRID(ST_MakePoint(-122.4194, 37.7749), 4326)
) AS distance_meters
FROM locations
WHERE ST_DWithin(
geom,
ST_SetSRID(ST_MakePoint(-122.4194, 37.7749), 4326),
5000 -- 5km radius
)
ORDER BY distance_meters
LIMIT 10;
-- Find locations within polygon (e.g., city boundary)
SELECT l.name
FROM locations l
JOIN city_boundaries b ON ST_Contains(b.geom, l.geom)
WHERE b.city_name = 'San Francisco';
-- Calculate route distance between points
SELECT ST_Length(
ST_MakeLine(
ST_SetSRID(ST_MakePoint(-122.4194, 37.7749), 4326),
ST_SetSRID(ST_MakePoint(-122.2694, 37.8049), 4326)
)
); -- Returns in meters for geography type
Delivery Zone Analysis
-- Create delivery zones
CREATE TABLE delivery_zones (
zone_id SERIAL PRIMARY KEY,
zone_name VARCHAR(100),
polygon GEOMETRY(Polygon, 4326)
);
-- Check if location is in delivery zone
SELECT
l.name,
dz.zone_name,
ST_Distance(
l.geom::geography,
ST_Centroid(dz.polygon)::geography
) AS distance_to_center
FROM locations l
CROSS JOIN delivery_zones dz
WHERE ST_Contains(dz.polygon, l.geom);
-- Optimize delivery routes ( Traveling Salesman Problem)
WITH RECURSIVE route AS (
-- Start from depot
SELECT
location_id,
geom,
0 AS step,
ARRAY[location_id] AS path
FROM locations
WHERE location_id = 1
UNION ALL
-- Find nearest unvisited location
SELECT
l.location_id,
l.geom,
r.step + 1,
r.path || l.location_id
FROM route r
JOIN locations l ON l.location_id != ALL(r.path)
WHERE r.step < 10 -- Number of stops
ORDER BY ST_Distance(r.geom, l.geom)
LIMIT 1
)
SELECT * FROM route;
Multi-Tenant Applications
Row-Level Security
-- Enable RLS
ALTER TABLE tenants ENABLE ROW LEVEL SECURITY;
-- Create tenant context
CREATE TABLE tenants (
tenant_id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
domain VARCHAR(255),
is_active BOOLEAN DEFAULT TRUE
);
-- Set current tenant function
CREATE OR REPLACE FUNCTION current_tenant_id()
RETURNS INTEGER AS $$
SELECT NULLIF(current_setting('app.tenant_id', true), '')::INTEGER;
$$ LANGUAGE SQL STABLE;
-- RLS policy
CREATE POLICY tenant_isolation_policy ON users
FOR ALL
USING (tenant_id = current_tenant_id());
CREATE POLICY tenant_isolation_policy ON orders
FOR ALL
USING (tenant_id = current_tenant_id());
-- All tables in tenant schema use RLS
-- Set tenant for session
SET app.tenant_id = 123;
-- All queries automatically filtered
SELECT * FROM users; -- Only returns users for tenant 123
Tenant Isolation with Schema
-- Alternative: Separate schemas per tenant
-- Create schema for new tenant
CREATE SCHEMA tenant_123;
-- Create tables in tenant schema
CREATE TABLE tenant_123.users (LIKE users INCLUDING ALL);
-- Grant access
GRANT USAGE ON SCHEMA tenant_123 TO tenant_123_app;
-- Queries use schema prefix
SELECT * FROM tenant_123.users;
Time-Series Data
TimescaleDB Hypertable
-- Convert to hypertable
SELECT create_hypertable('measurements', 'time');
-- Regular table โ hypertable
CREATE TABLE metrics (
time TIMESTAMPTZ NOT NULL,
device_id INTEGER,
temperature DECIMAL(5, 2),
humidity DECIMAL(5, 2),
pressure DECIMAL(6, 2)
);
SELECT create_hypertable('metrics', 'time',
chunk_time_interval => INTERVAL '1 day',
if_not_exists => TRUE
);
-- Enable compression
ALTER TABLE metrics SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'device_id'
);
-- Add compression policy
SELECT add_compression_policy('metrics', INTERVAL '7 days');
-- Query time buckets
SELECT time_bucket('15 minutes', time) AS bucket,
device_id,
AVG(temperature) AS avg_temp,
MAX(temperature) AS max_temp,
MIN(temperature) AS min_temp
FROM metrics
WHERE time >= NOW() - INTERVAL '24 hours'
GROUP BY bucket, device_id
ORDER BY bucket;
-- Continuous aggregates
CREATE MATERIALIZED VIEW hourly_metrics
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', time) AS hour,
device_id,
AVG(temperature) AS avg_temp,
COUNT(*) AS readings
FROM metrics
GROUP BY hour, device_id;
-- Refresh policy
SELECT add_continuous_aggregate_policy('hourly_metrics',
start_offset => INTERVAL '3 hours',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '1 hour');
JSON Document Storage
Flexible Schema Design
-- Product catalog with flexible attributes
CREATE TABLE products (
product_id SERIAL PRIMARY KEY,
name VARCHAR(255) NOT NULL,
category VARCHAR(100),
base_price DECIMAL(10, 2),
attributes JSONB DEFAULT '{}',
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP
);
-- Index JSON fields
CREATE INDEX idx_products_attributes
ON products USING GIN (attributes);
-- Query JSON
SELECT * FROM products
WHERE attributes->>'color' = 'red'
AND attributes->>'size' = 'large';
-- Aggregate JSON
SELECT
attributes->>'color' AS color,
COUNT(*) AS count,
AVG(base_price) AS avg_price
FROM products
WHERE attributes ? 'color'
GROUP BY attributes->>'color';
-- Partial index for specific attribute
CREATE INDEX idx_products_color
ON products ((attributes->>'color'))
WHERE attributes ? 'color';
Event Sourcing
-- Event store
CREATE TABLE events (
event_id BIGSERIAL PRIMARY KEY,
aggregate_id UUID NOT NULL,
aggregate_type VARCHAR(100) NOT NULL,
event_type VARCHAR(100) NOT NULL,
event_data JSONB NOT NULL,
metadata JSONB DEFAULT '{}',
created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP,
version INTEGER NOT NULL
);
-- Index for event sourcing queries
CREATE INDEX idx_events_aggregate
ON events (aggregate_type, aggregate_id, version);
-- Append event
INSERT INTO events (aggregate_id, aggregate_type, event_type, event_data, version)
VALUES (
'550e8400-e29b-41d4-a716-446655440000',
'Order',
'OrderCreated',
'{"customer_id": 123, "total": 100.00}'::jsonb,
1
);
-- Reconstruct aggregate
SELECT event_data
FROM events
WHERE aggregate_id = '550e8400-e29b-41d4-a716-446655440000'
AND aggregate_type = 'Order'
ORDER BY version;
Best Practices Summary
| Use Case | Key Features | PostgreSQL Solution |
|---|---|---|
| E-commerce | Inventory, orders, payments | ACID, JSONB, constraints |
| Financial | Double-entry, audit | Serializable isolation, triggers |
| Analytics | Aggregations, reporting | Materialized views, TimescaleDB |
| Geospatial | Maps, routing | PostGIS, GIST indexes |
| Multi-tenant | Isolation, RLS | Row-level security |
| Time-series | Metrics, IoT | TimescaleDB, hypertables |
| Document | Flexible schema | JSONB, GIN indexes |
Conclusion
PostgreSQL’s versatility makes it suitable for virtually any production use case. From e-commerce platforms handling millions of transactions to financial systems requiring strict ACID compliance, from geospatial applications to time-series data analysis, PostgreSQL delivers reliability, performance, and flexibility.
Combined with its extension ecosystem (PostGIS, TimescaleDB, pgvector), PostgreSQL can handle requirements that would previously have required multiple specialized databases.
Resources
- PostgreSQL Documentation
- PostGIS Documentation
- TimescaleDB Documentation
- EnterpriseDB Best Practices
Comments