Skip to main content
โšก Calmops

MariaDB for AI: Vector Search, RAG Pipelines, and AI Agent Integration

Introduction

The integration of AI capabilities into databases has become a critical trend in modern data management. MariaDB leads this evolution with native vector search support (MariaDB Vector), enabling developers to build AI-powered applications without specialized vector databases.

This comprehensive guide explores how to leverage MariaDB for AI applications, from embedding storage to RAG pipelines and AI agent integration.


What are Embeddings?

Embeddings are numerical representations that capture semantic meaning of text, images, or other data types. Similar items have similar embedding vectors.

import numpy as np

# Simple embedding example
# In production, use models like:
# - sentence-transformers
# - OpenAI ada-002
# - HuggingFace models

texts = ["cat", "dog", "computer", "keyboard"]

# Conceptually similar items cluster together
# cat ---- dog (similar)
# computer ---- keyboard (similar)

def cosine_similarity(a, b):
    return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))

# Example: vectors would be generated by ML models
# cat: [0.1, 0.2, 0.9]
# dog: [0.1, 0.2, 0.85]

MariaDB Vector Types

-- MariaDB 11.7+ supports native vector type
CREATE TABLE embeddings (
    id INT PRIMARY KEY AUTO_INCREMENT,
    document_id INT,
    text_content TEXT,
    embedding VECTOR(384)  -- 384 dimensions (MiniLM)
);

-- Or 1536 dimensions (ada-002)
CREATE TABLE embeddings_ada (
    id INT PRIMARY KEY AUTO_INCREMENT,
    text_content TEXT,
    embedding VECTOR(1536)
);

MariaDB Vector Setup

Installation

# MariaDB Vector requires MariaDB 11.7 or 11.8 LTS
# Install MariaDB 11.8
curl -LsS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | bash
apt install mariadb-server mariadb-client

# Start and enable
systemctl start mariadb
systemctl enable mariadb

Configuration

# my.cnf for vector workloads
[mysqld]
# Increase buffer for vector operations
vector-buffer-size=1G

# InnoDB settings
innodb-buffer-pool-size=4G

# Allow larger packets
max-allowed-packet=256M

Creating Vector Tables

Basic Vector Table

-- Create table with vector column
CREATE TABLE documents (
    id INT PRIMARY KEY AUTO_INCREMENT,
    title VARCHAR(255),
    content TEXT,
    embedding VECTOR(384)
);

-- Create vector index (HNSW algorithm)
CREATE INDEX idx_embedding ON documents(embedding)
    USING VECTOR_HNSW WITH (nlist = 4);

Vector Index Options

-- HNSW (Hierarchical Navigable Small World)
-- Best for: production workloads, high recall

CREATE INDEX idx_hnsw ON table(vector_col)
    USING VECTOR_HNSW WITH (nlist = 4, efConstruction = 200);

-- FLAT (brute force)
-- Best for: small datasets, exact results

CREATE INDEX idx_flat ON table(vector_col)
    USING VECTOR_FLAT;

Inserting Vectors

Python Insert Example

import mariadb
import numpy as np
from sentence_transformers import SentenceTransformer

class MariaDBVectorStore:
    def __init__(self, config):
        self.conn = mariadb.connect(
            host=config['host'],
            user=config['user'],
            password=config['password'],
            database=config['database']
        )
        self.model = SentenceTransformer('all-MiniLM-L6-v2')
    
    def create_tables(self):
        cursor = self.conn.cursor()
        
        # Create document table
        cursor.execute("""
            CREATE TABLE IF NOT EXISTS documents (
                id INT PRIMARY KEY AUTO_INCREMENT,
                title VARCHAR(255),
                content TEXT,
                embedding VECTOR(384)
            )
        """)
        
        # Create vector index
        cursor.execute("""
            CREATE INDEX IF NOT EXISTS idx_embedding 
            ON documents(embedding) 
            USING VECTOR_HNSW WITH (nlist = 4)
        """)
        
        self.conn.commit()
    
    def add_document(self, title, content):
        # Generate embedding
        embedding = self.model.encode(content)
        
        # Convert to string format for MariaDB
        vector_str = '[' + ','.join(map(str, embedding.tolist())) + ']'
        
        cursor = self.conn.cursor()
        cursor.execute(
            "INSERT INTO documents (title, content, embedding) VALUES (?, ?, ?)",
            (title, content, vector_str)
        )
        self.conn.commit()
        
        return cursor.lastrowid
    
    def add_documents_batch(self, documents):
        """Batch insert documents."""
        cursor = self.conn.cursor()
        
        for title, content in documents:
            embedding = self.model.encode(content)
            vector_str = '[' + ','.join(map(str, embedding.tolist())) + ']'
            cursor.execute(
                "INSERT INTO documents (title, content, embedding) VALUES (?, ?, ?)",
                (title, content, vector_str)
            )
        
        self.conn.commit()

# Usage
store = MariaDBVectorStore({
    'host': 'localhost',
    'user': 'root',
    'password': 'secret',
    'database': 'vectors'
})

store.create_tables()
store.add_document(
    "Python Tutorial", 
    "Python is a high-level programming language..."
)

-- Cosine distance search
SELECT 
    id, 
    title, 
    vector_distance_cosine(embedding, '[0.1,0.2,...]') as distance
FROM documents
ORDER BY distance
LIMIT 5;

-- Using vector index (faster)
SELECT id, title
FROM documents
WHERE vector_search(embedding, '[0.1,0.2,...]', k => 5);

-- Euclidean distance
SELECT id, title,
    vector_distance_euclidean(embedding, '[0.1,0.2,...]') as dist
FROM documents
ORDER BY dist
LIMIT 5;

Python Search Implementation

class VectorSearch:
    def __init__(self, db_config):
        self.conn = mariadb.connect(**db_config)
    
    def search(self, query, top_k=5):
        # Generate query embedding
        model = SentenceTransformer('all-MiniLM-L6-v2')
        query_embedding = model.encode(query)
        vector_str = '[' + ','.join(map(str, query_embedding.tolist())) + ']'
        
        cursor = self.conn.cursor(dictionary=True)
        
        # Search using index
        cursor.execute("""
            SELECT 
                id,
                title,
                content,
                vector_distance_cosine(embedding, %s) as distance
            FROM documents
            ORDER BY distance
            LIMIT %s
        """, (vector_str, top_k))
        
        results = cursor.fetchall()
        
        for result in results:
            result['distance'] = float(result['distance'])
        
        return results
    
    def search_filtered(self, query, category, top_k=5):
        """Search with metadata filter."""
        model = SentenceTransformer('all-MiniLM-L6-v2')
        query_embedding = model.encode(query)
        vector_str = '[' + ','.join(map(str, query_embedding.tolist())) + ']'
        
        cursor = self.conn.cursor(dictionary=True)
        
        cursor.execute("""
            SELECT 
                id,
                title,
                vector_distance_cosine(embedding, %s) as distance
            FROM documents
            WHERE category = %s
            ORDER BY distance
            LIMIT %s
        """, (vector_str, category, top_k))
        
        return cursor.fetchall()

# Usage
search = VectorSearch({'host': 'localhost', 'user': 'root', 
                       'password': 'secret', 'database': 'vectors'})

results = search.search("programming language tutorial")
for r in results:
    print(f"Title: {r['title']}, Distance: {r['distance']:.4f}")

Building RAG Pipelines

RAG Architecture

import mariadb
import openai
from sentence_transformers import SentenceTransformer

class MariaDBRAG:
    """RAG pipeline using MariaDB for vector storage."""
    
    def __init__(self, db_config):
        self.conn = mariadb.connect(**db_config)
        self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
        self._init_schema()
    
    def _init_schema(self):
        """Initialize database schema."""
        cursor = self.conn.cursor()
        
        # Documents with embeddings
        cursor.execute("""
            CREATE TABLE IF NOT EXISTS chunks (
                id INT PRIMARY KEY AUTO_INCREMENT,
                document_id INT,
                chunk_text TEXT,
                chunk_index INT,
                embedding VECTOR(384)
            )
        """)
        
        # Metadata table
        cursor.execute("""
            CREATE TABLE IF NOT EXISTS documents (
                id INT PRIMARY KEY AUTO_INCREMENT,
                title VARCHAR(255),
                source TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
        """)
        
        # Vector index
        cursor.execute("""
            CREATE INDEX IF NOT EXISTS idx_chunks_embedding 
            ON chunks(embedding) 
            USING VECTOR_HNSW WITH (nlist = 4)
        """)
        
        self.conn.commit()
    
    def ingest_document(self, title, source, text, chunk_size=500):
        """Split document into chunks and store."""
        # Split into chunks
        chunks = [text[i:i+chunk_size] 
                  for i in range(0, len(text), chunk_size)]
        
        cursor = self.conn.cursor()
        
        # Insert document metadata
        cursor.execute(
            "INSERT INTO documents (title, source) VALUES (?, ?)",
            (title, source)
        )
        document_id = cursor.lastrowid
        
        # Insert chunks with embeddings
        for i, chunk in enumerate(chunks):
            embedding = self.embedding_model.encode(chunk)
            vector_str = '[' + ','.join(map(str, embedding.tolist())) + ']'
            
            cursor.execute("""
                INSERT INTO chunks (document_id, chunk_text, chunk_index, embedding)
                VALUES (?, ?, ?, ?)
            """, (document_id, chunk, i, vector_str))
        
        self.conn.commit()
        return document_id
    
    def retrieve(self, query, top_k=5):
        """Retrieve relevant context."""
        query_embedding = self.embedding_model.encode(query)
        vector_str = '[' + ','.join(map(str, query_embedding.tolist())) + ']'
        
        cursor = self.conn.cursor(dictionary=True)
        
        cursor.execute("""
            SELECT 
                chunk_text,
                document_id,
                vector_distance_cosine(embedding, %s) as distance
            FROM chunks
            ORDER BY distance
            LIMIT %s
        """, (vector_str, top_k))
        
        return cursor.fetchall()
    
    def answer(self, question, max_context=2000):
        """Answer question using RAG."""
        # Retrieve relevant chunks
        chunks = self.retrieve(question, top_k=10)
        
        # Build context
        context = ""
        for chunk in chunks:
            if len(context) + len(chunk['chunk_text']) > max_context:
                break
            context += chunk['chunk_text'] + "\n\n"
        
        # Generate answer
        prompt = f"""Based on the following context, answer the question.

Context:
{context}

Question: {question}

Answer:"""
        
        response = openai.ChatCompletion.create(
            model="gpt-4",
            messages=[{"role": "user", "content": prompt}],
            temperature=0
        )
        
        return response.choices[0].message.content

# Usage
rag = MariaDBRAG({'host': 'localhost', 'user': 'root', 
                  'password': 'secret', 'database': 'rag'})

# Ingest document
with open('article.txt') as f:
    rag.ingest_document('Article Title', 'article.txt', f.read())

# Ask question
answer = rag.answer("What is the main topic of the document?")
print(answer)

AI Agent Integration

MariaDB with AI Agents

class MariaDBAgent:
    """AI Agent with MariaDB as knowledge base."""
    
    def __init__(self, db_config, llm_provider='openai'):
        self.rag = MariaDBRAG(db_config)
        self.llm_provider = llm_provider
        
        # Tools available to agent
        self.tools = {
            'search_knowledge': self.search_knowledge,
            'add_knowledge': self.add_knowledge,
            'list_documents': self.list_documents
        }
    
    def search_knowledge(self, query):
        """Tool: Search knowledge base."""
        results = self.rag.retrieve(query)
        return '\n\n'.join([r['chunk_text'][:500] for r in results])
    
    def add_knowledge(self, title, content):
        """Tool: Add to knowledge base."""
        return self.rag.ingest_document(title, 'user_input', content)
    
    def list_documents(self):
        """Tool: List stored documents."""
        cursor = self.rag.conn.cursor(dictionary=True)
        cursor.execute("SELECT id, title, source, created_at FROM documents")
        return cursor.fetchall()
    
    def chat(self, user_message):
        """Process user message with agent logic."""
        
        # Simple agent: determine action from message
        if 'add' in user_message.lower() or 'remember' in user_message.lower():
            # Extract content to remember
            return "What would you like me to remember?"
        
        elif 'search' in user_message.lower() or 'find' in user_message.lower():
            # Search knowledge base
            results = self.search_knowledge(user_message)
            return f"Found relevant information:\n\n{results[:1000]}"
        
        else:
            # Use RAG to answer
            try:
                answer = self.rag.answer(user_message)
                return answer
            except Exception as e:
                return f"I couldn't find an answer. Error: {str(e)}"

# Usage
agent = MariaDBAgent({'host': 'localhost', 'user': 'root', 
                      'password': 'secret', 'database': 'agent'})

# Add knowledge
agent.add_knowledge(
    "Meeting Notes", 
    "Team meeting: discuss Q1 roadmap, assign tasks..."
)

# Chat
response = agent.chat("What was discussed in the team meeting?")
print(response)

-- Create full-text index for keyword search
ALTER TABLE documents 
ADD FULLTEXT INDEX ft_content (title, content);

-- Hybrid search
SELECT 
    id,
    title,
    content,
    vector_distance_cosine(embedding, '[0.1,...]') as vector_dist,
    MATCH(title, content) AGAINST('python tutorial' IN BOOLEAN MODE) as text_score
FROM documents
WHERE MATCH(title, content) AGAINST('python tutorial' IN BOOLEAN MODE)
ORDER BY (vector_dist * 0.7) + (text_score * 0.3)
LIMIT 10;

Performance Optimization

Vector Index Tuning

-- Tune HNSW parameters
-- nlist: number of clusters (higher = more precise, slower)
-- efConstruction: build time (higher = better index, slower build)
-- efSearch: query time (higher = better recall, slower)

CREATE INDEX idx_optimized ON table(vector_col)
    USING VECTOR_HNSW WITH (nlist = 8, efConstruction = 400, efSearch = 100);

Batch Processing

def efficient_batch_search(queries, model, conn, top_k=5):
    """Search multiple queries efficiently."""
    results = []
    
    for query in queries:
        embedding = model.encode(query)
        vector_str = '[' + ','.join(map(str, embedding.tolist())) + ']'
        
        cursor = conn.cursor(dictionary=True)
        cursor.execute("""
            SELECT id, vector_distance_cosine(embedding, %s) as distance
            FROM documents
            ORDER BY distance
            LIMIT %s
        """, (vector_str, top_k))
        
        results.append(cursor.fetchall())
    
    return results

Deployment Considerations

Cloud Deployment

# Kubernetes deployment for MariaDB with vector workloads
apiVersion: v1
kind: ConfigMap
metadata:
  name: mariadb-config
data:
  my.cnf: |
    [mysqld]
    vector-buffer-size=2G
    innodb-buffer-pool-size=8G
    max-allowed-packet=256M
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mariadb
spec:
  serviceName: mariadb
  replicas: 1
  template:
    spec:
      containers:
      - name: mariadb
        image: mariadb:11.8
        volumeMounts:
        - name: config
          mountPath: /etc/mysql/conf.d
        - name: data
          mountPath: /var/lib/mysql
        resources:
          requests:
            memory: "8Gi"
          limits:
            memory: "16Gi"

Monitoring Vector Operations

-- Check vector index usage
SHOW INDEX FROM documents;

-- Monitor query performance
EXPLAIN FORMAT=JSON
SELECT id, vector_distance_cosine(embedding, '[0.1,...]') 
FROM documents 
ORDER BY distance 
LIMIT 5;

-- Check for slow vector queries
SELECT * FROM mysql.slow_log 
WHERE query_time > 1 
ORDER BY start_time DESC;

Best Practices

Embedding Model Selection

# Recommended models for different use cases

# Fast & small (384 dimensions)
model = SentenceTransformer('all-MiniLM-L6-v2')

# Better quality (768 dimensions)
model = SentenceTransformer('all-mpnet-base-v2')

# For production:
# - bge-small-en-v1.5
# - bge-base-en-v1.5
# - e5-small-v2

Data Management

-- Regular vacuum for vector tables
OPTIMIZE TABLE documents;

-- Monitor table size
SHOW TABLE STATUS LIKE 'documents';

-- Delete old embeddings
DELETE FROM documents 
WHERE created_at < DATE_SUB(NOW(), INTERVAL 90 DAY);

Alternatives and Complementary Tools

# When to use MariaDB Vector:
# โœ“ Existing MariaDB infrastructure
# โœ“ Need hybrid (relational + vector)
# โœ“ Self-hosted requirement
# โœ“ Cost-sensitive projects

# Alternatives:
# - Pinecone: cloud vector DB
# - Weaviate: open source
# - Milvus: open source
# - ChromaDB: developer-friendly

# Complementary:
# - Redis: caching layer
# - Elasticsearch: search
# - S3: document storage

Resources


Conclusion

MariaDB Vector enables powerful AI applications directly within your existing MariaDB infrastructure. From semantic search to RAG pipelines, MariaDB provides a unified platform for both traditional and AI-driven workloads.

In the next article, we’ll explore real-world MariaDB use cases, including production patterns and implementation strategies.

Comments