Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/sqlite duckdb vector support #555

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ repos:
- id: debug-statements
- id: mixed-line-ending

- repo: https://github.com/pycqa/isort
rev: 5.12.0
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.3
hooks:
- id: isort
args: [ "--profile", "black", "--filter-files" ]
#- id: ruff
- id: ruff-format
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ mysql = ["PyMySQL"]
clickhouse = ["clickhouse_connect"]
bigquery = ["google-cloud-bigquery"]
snowflake = ["snowflake-connector-python"]
duckdb = ["duckdb"]
duckdb = ["duckdb", "fastembed"]
sqlite = ["fastembed"]
google = ["google-generativeai", "google-cloud-aiplatform"]
all = ["psycopg2-binary", "db-dtypes", "PyMySQL", "google-cloud-bigquery", "snowflake-connector-python", "duckdb", "openai", "mistralai", "chromadb", "anthropic", "zhipuai", "marqo", "google-generativeai", "google-cloud-aiplatform", "qdrant-client", "fastembed", "ollama", "httpx", "opensearch-py", "opensearch-dsl", "transformers", "pinecone-client", "pymilvus[model]","weaviate-client"]
test = ["tox"]
Expand Down
446 changes: 224 additions & 222 deletions src/vanna/base/base.py

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions src/vanna/duckdb/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .duckdb_vector import DuckDB_VectorStore
196 changes: 196 additions & 0 deletions src/vanna/duckdb/duckdb_vector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,196 @@
import json
import os
from typing import List

import duckdb
import numpy as np
import pandas as pd
from fastembed import TextEmbedding

from vanna.base import VannaBase
from vanna.utils import deterministic_uuid


class DuckDB_VectorStore(VannaBase):
def __init__(self, config=None):
super().__init__(config=config)
if config is None:
config = {}

self.database = config.get("database", ".")
self.n_results_sql = config.get("n_results_sql", config.get("n_results", 10))
self.n_results_documentation = config.get(
"n_results_documentation", config.get("n_results", 10)
)
self.n_results_ddl = config.get("n_results_ddl", config.get("n_results", 10))

self.model_name = self.config.get("model_name", "BAAI/bge-small-en-v1.5")
self.embedding_model = TextEmbedding(model_name=self.model_name)
self.embedding_size = self.config.get(
"embedding_size", 384
) # default is size of BAAI/bge-small-en-v1.5

conn = duckdb.connect(database=self.database)
conn.execute(
f"""
CREATE TABLE IF NOT EXISTS embeddings (
id VARCHAR,
text VARCHAR,
model VARCHAR,
vec FLOAT[{self.embedding_size}]
);
"""
)
conn.close()

def generate_embedding(self, data: str) -> List[float]:
embeddings = list(self.embedding_model.embed([data]))
return embeddings[0]

def write_embedding_to_table(self, text, id, embedding):
con = duckdb.connect(database=self.database)
embedding_array = np.array(embedding, dtype=np.float32).tolist()
con.execute(
"INSERT INTO embeddings (id, text, model, vec) VALUES (?, ?, ?, ?)",
[id, text, self.model_name, embedding_array],
)
con.close()

def add_question_sql(self, question: str, sql: str) -> str:
question_sql_json = json.dumps(
{
"question": question,
"sql": sql,
},
ensure_ascii=False,
)
id = deterministic_uuid(question_sql_json) + "-sql"
self.write_embedding_to_table(
question_sql_json, id, self.generate_embedding(question_sql_json)
)
return id

def add_ddl(self, ddl: str) -> str:
id = deterministic_uuid(ddl) + "-ddl"
self.write_embedding_to_table(ddl, id, self.generate_embedding(ddl))
return id

def add_documentation(self, documentation: str) -> str:
id = deterministic_uuid(documentation) + "-doc"
self.write_embedding_to_table(
documentation, id, self.generate_embedding(documentation)
)
return id

def get_training_data(self) -> pd.DataFrame:
con = duckdb.connect(database=self.database)
sql_data = con.execute("SELECT * FROM embeddings").fetchdf()
con.close()

df = pd.DataFrame()

if not sql_data.empty:
df_sql = sql_data[sql_data["id"].str.endswith("-sql")]
df_sql = pd.DataFrame(
{
"id": df_sql["id"],
"question": [json.loads(doc)["question"] for doc in df_sql["text"]],
"content": [json.loads(doc)["sql"] for doc in df_sql["text"]],
"training_data_type": "sql",
}
)
df = pd.concat([df, df_sql])

df_ddl = sql_data[sql_data["id"].str.endswith("-ddl")]
df_ddl = pd.DataFrame(
{
"id": df_ddl["id"],
"question": None,
"content": df_ddl["text"],
"training_data_type": "ddl",
}
)
df = pd.concat([df, df_ddl])

df_doc = sql_data[sql_data["id"].str.endswith("-doc")]
df_doc = pd.DataFrame(
{
"id": df_doc["id"],
"question": None,
"content": df_doc["text"],
"training_data_type": "documentation",
}
)
df = pd.concat([df, df_doc])

return df

def remove_training_data(self, id: str) -> bool:
con = duckdb.connect(database=self.database)
con.execute("DELETE FROM embeddings WHERE id = ?", [id])
con.close()
return True

def remove_collection(self, collection_name: str) -> bool:
suffix = {"sql": "-sql", "ddl": "-ddl", "documentation": "-doc"}.get(
collection_name, None
)
if suffix:
con = duckdb.connect(database=self.database)
con.execute("DELETE FROM embeddings WHERE id LIKE ?", ["%" + suffix])
con.close()
return True
return False

def query_similar_embeddings(self, query_text: str, top_n: int) -> pd.DataFrame:
query_embedding = self.generate_embedding(query_text)
query_embedding_array = np.array(query_embedding, dtype=np.float32).tolist()

con = duckdb.connect(database=self.database)
results = con.execute(
"""
SELECT text, array_cosine_similarity(vec, ?::FLOAT[384]) AS similarity_score
FROM embeddings
ORDER BY similarity_score DESC
LIMIT ?;
""",
[query_embedding_array, top_n],
).fetchdf()
con.close()
return results

def get_similar_question_sql(self, question: str) -> list:
results = self.query_similar_embeddings(question, self.n_results_sql)
similar_questions = []
for doc in results["text"]:
try:
parsed_doc = json.loads(doc)
similar_questions.append(
{"question": parsed_doc["question"], "sql": parsed_doc["sql"]}
)
except json.JSONDecodeError as e:
similar_questions.append(doc)
continue
return similar_questions

def get_related_ddl(self, question: str) -> list:
results = self.query_similar_embeddings(question, self.n_results_ddl)
related_ddls = []
for doc in results["text"]:
try:
related_ddls.append(json.loads(doc))
except json.JSONDecodeError as e:
related_ddls.append(doc)
continue
return related_ddls

def get_related_documentation(self, question: str) -> list:
results = self.query_similar_embeddings(question, self.n_results_documentation)
related_docs = []
for doc in results["text"]:
try:
related_docs.append(json.loads(doc))
except json.JSONDecodeError as e:
related_docs.append(doc)
continue
return related_docs
1 change: 1 addition & 0 deletions src/vanna/sqlite/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .sqlite_vector import SQLite_VectorStore
Loading