Skip to content

freeze all requirements and document the project requirements #1236

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Dec 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions packages/postgresml-python/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,6 @@ rm "$deb_dir/release.sh"
(cat ${SCRIPT_DIR}/DEBIAN/postrm | envsubst '${PGVERSION}') > "$deb_dir/DEBIAN/postrm"

cp ${SCRIPT_DIR}/../../pgml-extension/requirements.txt "$deb_dir/etc/postgresml-python/requirements.txt"
cp ${SCRIPT_DIR}/../../pgml-extension/requirements-autogptq.txt "$deb_dir/etc/postgresml-python/requirements-autogptq.txt"
cp ${SCRIPT_DIR}/../../pgml-extension/requirements-xformers.txt "$deb_dir/etc/postgresml-python/requirements-xformers.txt"

virtualenv --python="python$PYTHON_VERSION" "$deb_dir/var/lib/postgresml-python/pgml-venv"
source "$deb_dir/var/lib/postgresml-python/pgml-venv/bin/activate"
Expand Down
3 changes: 1 addition & 2 deletions pgml-cms/docs/resources/developer-docs/installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,7 @@ To install the necessary Python packages into a virtual environment, use the `vi
```bash
virtualenv pgml-venv && \
source pgml-venv/bin/activate && \
pip install -r requirements.txt && \
pip install -r requirements-xformers.txt --no-dependencies
pip install -r requirements.txt
```
{% endtab %}

Expand Down
6 changes: 3 additions & 3 deletions pgml-extension/examples/multi_classification.sql
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ LIMIT 10;

-- linear models
SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'ridge');
SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'stochastic_gradient_descent');
SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'perceptron');
SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'passive_aggressive');
--SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'stochastic_gradient_descent');
--SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'perceptron');
--SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'passive_aggressive');

-- support vector machines
SELECT * FROM pgml.train('Iris Flower Types', algorithm => 'svm');
Expand Down
13 changes: 13 additions & 0 deletions pgml-extension/examples/transformers.sql
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,19 @@ SELECT pgml.embed('intfloat/e5-small', 'hi mom', '{"device": "cpu"}');

SELECT pgml.embed('hkunlp/instructor-xl', 'hi mom', '{"instruction": "Encode it with love"}');

SELECT pgml.transform_stream(
task => '{
"task": "text-generation",
"model": "TheBloke/zephyr-7B-beta-GPTQ",
"model_type": "mistral",
"revision": "main",
"device_map": "auto"
}'::JSONB,
input => 'AI is going to',
args => '{
"max_new_tokens": 100
}'::JSONB
);
-- BitsAndBytes support
SELECT pgml.transform(
task => '{
Expand Down
1 change: 0 additions & 1 deletion pgml-extension/requirements-autogptq.txt

This file was deleted.

1 change: 0 additions & 1 deletion pgml-extension/requirements-xformers.txt

This file was deleted.

41 changes: 41 additions & 0 deletions pgml-extension/requirements.base.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# The immediate dependencies of PostgresML are maintained here.

# Locked because newer versions have bugs
transformers-stream-generator==0.0.4
optimum==1.13.2
peft==0.6.2
pyarrow==11.0.0
torch==2.0.1 # 2.1.1 breaks sentence-transformers==2.2.2

# ML
catboost
lightgbm
torchaudio
torchvision
xgboost

# Transformers
accelerate
auto-gptq; sys_platform == 'linux'
bitsandbytes
ctransformers
huggingface-hub
deepspeed
einops
tokenizers
transformers
xformers; sys_platform == 'linux'

# Embeddings
InstructorEmbedding
sentence-transformers

# Ratings
rouge
sacrebleu
sacremoses

# Utils
datasets
orjson
langchain
113 changes: 91 additions & 22 deletions pgml-extension/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,32 +1,101 @@
accelerate==0.22.0
bitsandbytes==0.41.1
catboost==1.2
accelerate==0.25.0
aiohttp==3.9.1
aiosignal==1.3.1
annotated-types==0.6.0
anyio==4.1.0
attrs==23.1.0
bitsandbytes==0.41.3.post2
catboost==1.2.2
certifi==2023.11.17
charset-normalizer==3.3.2
click==8.1.7
colorama==0.4.6
contourpy==1.2.0
ctransformers==0.2.27
datasets==2.14.5
deepspeed==0.10.3
huggingface-hub==0.17.1
cycler==0.12.1
dataclasses-json==0.6.3
datasets==2.15.0
deepspeed==0.12.4
dill==0.3.7
einops==0.7.0
filelock==3.13.1
fonttools==4.46.0
frozenlist==1.4.0
fsspec==2023.10.0
graphviz==0.20.1
hjson==3.1.0
huggingface-hub==0.19.4
idna==3.6
InstructorEmbedding==1.0.1
Jinja2==3.1.2
joblib==1.3.2
jsonpatch==1.33
jsonpointer==2.4
kiwisolver==1.4.5
langchain==0.0.349
langchain-community==0.0.1
langchain-core==0.0.13
langsmith==0.0.69
lightgbm==4.1.0
orjson==3.9.7
pandas==2.1.0
rich==13.5.2
lxml==4.9.3
MarkupSafe==2.1.3
marshmallow==3.20.1
matplotlib==3.8.2
mpmath==1.3.0
multidict==6.0.4
multiprocess==0.70.15
mypy-extensions==1.0.0
networkx==3.2.1
ninja==1.11.1.1
nltk==3.8.1
numpy==1.26.2
optimum==1.13.2
orjson==3.9.10
packaging==23.2
pandas==2.1.4
peft==0.6.2
Pillow==10.1.0
plotly==5.18.0
portalocker==2.8.2
psutil==5.9.6
py-cpuinfo==9.0.0
pyarrow==14.0.1
pyarrow-hotfix==0.6
pydantic==2.5.2
pydantic_core==2.14.5
pynvml==11.5.0
pyparsing==3.1.1
python-dateutil==2.8.2
pytz==2023.3.post1
PyYAML==6.0.1
regex==2023.10.3
requests==2.31.0
rouge==1.0.1
sacrebleu==2.3.1
sacremoses==0.0.53
scikit-learn==1.3.0
sentencepiece==0.1.99
sacrebleu==2.3.3
sacremoses==0.1.1
safetensors==0.4.1
scikit-learn==1.3.2
scipy==1.11.4
sentence-transformers==2.2.2
tokenizers==0.14.1
sentencepiece==0.1.99
six==1.16.0
sniffio==1.3.0
SQLAlchemy==2.0.23
sympy==1.12
tabulate==0.9.0
tenacity==8.2.3
threadpoolctl==3.2.0
tokenizers==0.15.0
torch==2.0.1
torchaudio==2.0.2
torchvision==0.15.2
tqdm==4.66.1
transformers==4.34.1
xgboost==2.0.0
langchain==0.0.287
einops==0.6.1
pynvml==11.5.0
transformers==4.36.0
transformers-stream-generator==0.0.4
optimum==1.13.2
peft==0.6.2
pyarrow==11.0.0
typing-inspect==0.9.0
typing_extensions==4.9.0
tzdata==2023.3
urllib3==2.1.0
xgboost==2.0.2
xxhash==3.4.1
yarl==1.9.4
34 changes: 9 additions & 25 deletions pgml-extension/src/bindings/transformers/transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import shutil
import time
import queue
import sys

import datasets
from InstructorEmbedding import INSTRUCTOR
Expand Down Expand Up @@ -42,7 +41,6 @@
Trainer,
)
from threading import Thread
from typing import Optional

__cache_transformer_by_model_id = {}
__cache_sentence_transformer_by_name = {}
Expand Down Expand Up @@ -393,42 +391,28 @@ def transform(task, args, inputs, stream=False):
return orjson.dumps(pipe(inputs, **args), default=orjson_default).decode()


def create_embedding(transformer):
def embed(transformer, inputs, kwargs):
kwargs = orjson.loads(kwargs)
ensure_device(kwargs)
instructor = transformer.startswith("hkunlp/instructor")
klass = INSTRUCTOR if instructor else SentenceTransformer
return klass(transformer)

# Cache the model
if transformer not in __cache_sentence_transformer_by_name:
klass = INSTRUCTOR if instructor else SentenceTransformer
__cache_sentence_transformer_by_name[transformer] = klass(transformer)
model = __cache_sentence_transformer_by_name[transformer]

def embed_using(model, transformer, inputs, kwargs):
if isinstance(kwargs, str):
kwargs = orjson.loads(kwargs)

instructor = transformer.startswith("hkunlp/instructor")
# Handle instruction encoding
if instructor:
texts_with_instructions = []
instruction = kwargs.pop("instruction")
for text in inputs:
texts_with_instructions.append([instruction, text])

inputs = texts_with_instructions

return model.encode(inputs, **kwargs)


def embed(transformer, inputs, kwargs):
kwargs = orjson.loads(kwargs)

ensure_device(kwargs)

if transformer not in __cache_sentence_transformer_by_name:
__cache_sentence_transformer_by_name[transformer] = create_embedding(
transformer
)
model = __cache_sentence_transformer_by_name[transformer]

return embed_using(model, transformer, inputs, kwargs)


def clear_gpu_cache(memory_usage: None):
if not torch.cuda.is_available():
raise PgMLException(f"No GPU available")
Expand Down