From f54d5cf72bd84ceaf6466d2fae3c69678afa1bcb Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 10 Mar 2026 15:46:43 -0500 Subject: [PATCH 01/11] chore: Migrate from Poetry to uv workspace --- .gitignore | 5 +- CONTRIBUTING.md | 32 +++++++- Makefile | 6 +- .../ai-providers/server-ai-langchain/Makefile | 12 +-- .../server-ai-langchain/pyproject.toml | 52 ++++++------ .../ai-providers/server-ai-openai/Makefile | 13 ++- .../server-ai-openai/pyproject.toml | 51 ++++++------ packages/sdk/server-ai/Makefile | 16 ++-- packages/sdk/server-ai/pyproject.toml | 80 ++++++++++--------- pyproject.toml | 35 ++++---- 10 files changed, 166 insertions(+), 136 deletions(-) diff --git a/.gitignore b/.gitignore index 7b091b3..867bb36 100644 --- a/.gitignore +++ b/.gitignore @@ -72,5 +72,6 @@ test-packaging-venv .vscode/ .python-version -# Poetry -poetry.lock +# uv — lock file is not committed for libraries (only for applications) +uv.lock +.venv/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3d6d69e..a0ae783 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -14,13 +14,29 @@ We encourage pull requests and other contributions from the community. Before su ### Setup -This project is built using [poetry](https://python-poetry.org/). To learn more about the basics of working with this tool, read [Poetry's basic usage guide](https://python-poetry.org/docs/basic-usage/). +This project is built using [uv](https://docs.astral.sh/uv/). The repo is structured as a uv workspace — a single shared virtual environment at the repo root contains all packages and their dependencies, and cross-package dependencies (e.g. the provider packages depending on `launchdarkly-server-sdk-ai`) are automatically resolved from the local workspace members. -To begin development, ensure your dependencies are installed and (optionally) activate the virtualenv. +To install uv, see the [uv installation guide](https://docs.astral.sh/uv/getting-started/installation/). +To install all packages and dev dependencies into the shared workspace environment: + +```shell +make install +# or directly: +uv sync --all-groups ``` -poetry install -eval $(poetry env activate) + +To activate the shared virtual environment: + +```shell +source .venv/bin/activate +``` + +Alternatively, prefix any command with `uv run` to use the workspace environment without activating it: + +```shell +uv run pytest +uv run mypy src/ldai ``` ### Testing @@ -31,6 +47,14 @@ To run all unit tests: make test ``` +To run tests for a specific package: + +```shell +make test-server-ai +make test-openai +make test-langchain +``` + It is preferable to run tests against all supported minor versions of Python (as described in `README.md` under Requirements), or at least the lowest and highest versions, prior to submitting a pull request. However, LaunchDarkly's CI tests will run automatically against all supported versions. ### Building documentation diff --git a/Makefile b/Makefile index 34ddfeb..0d3e79f 100644 --- a/Makefile +++ b/Makefile @@ -23,10 +23,8 @@ help: #! Show this help message # .PHONY: install -install: #! Install all packages - $(MAKE) install-server-ai - $(MAKE) install-langchain - $(MAKE) install-openai +install: #! Install all packages and dev dependencies into the shared workspace environment + uv sync --all-groups .PHONY: install-server-ai install-server-ai: #! Install server-ai package diff --git a/packages/ai-providers/server-ai-langchain/Makefile b/packages/ai-providers/server-ai-langchain/Makefile index ca02807..efc820f 100644 --- a/packages/ai-providers/server-ai-langchain/Makefile +++ b/packages/ai-providers/server-ai-langchain/Makefile @@ -9,21 +9,21 @@ help: #! Show this help message .PHONY: install install: #! Install package dependencies - poetry install + uv sync --group dev .PHONY: test test: #! Run unit tests test: install - poetry run pytest $(PYTEST_FLAGS) + uv run pytest $(PYTEST_FLAGS) .PHONY: lint lint: #! Run type analysis and linting checks lint: install - poetry run mypy src/ldai_langchain - poetry run isort --check --atomic src/ldai_langchain - poetry run pycodestyle src/ldai_langchain + uv run mypy src/ldai_langchain + uv run isort --check --atomic src/ldai_langchain + uv run pycodestyle src/ldai_langchain .PHONY: build build: #! Build distribution files build: install - poetry build + uv build diff --git a/packages/ai-providers/server-ai-langchain/pyproject.toml b/packages/ai-providers/server-ai-langchain/pyproject.toml index 8c6bc0e..d07b53b 100644 --- a/packages/ai-providers/server-ai-langchain/pyproject.toml +++ b/packages/ai-providers/server-ai-langchain/pyproject.toml @@ -1,12 +1,11 @@ -[tool.poetry] +[project] name = "launchdarkly-server-sdk-ai-langchain" version = "0.3.1" description = "LaunchDarkly AI SDK LangChain Provider" -authors = ["LaunchDarkly "] -license = "Apache-2.0" +authors = [{name = "LaunchDarkly", email = "dev@launchdarkly.com"}] +license = {text = "Apache-2.0"} readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/ai/python" -repository = "https://github.com/launchdarkly/python-server-sdk-ai" +requires-python = ">=3.9,<4" classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", @@ -20,21 +19,32 @@ classifiers = [ "Topic :: Software Development", "Topic :: Software Development :: Libraries", ] -packages = [{ include = "ldai_langchain", from = "src" }] +dependencies = [ + "launchdarkly-server-sdk-ai>=0.16.0", + "langchain-core>=0.2.0", + "langchain>=0.2.0", +] + +[project.urls] +Homepage = "https://docs.launchdarkly.com/sdk/ai/python" +Repository = "https://github.com/launchdarkly/python-server-sdk-ai" + +[dependency-groups] +dev = [ + "pytest>=2.8", + "pytest-cov>=2.4.0", + "pytest-asyncio>=0.21.0", + "mypy==1.18.2", + "pycodestyle>=2.11.0", + "isort>=5.12.0", +] -[tool.poetry.dependencies] -python = ">=3.9,<4" -launchdarkly-server-sdk-ai = ">=0.16.0" -langchain-core = ">=0.2.0" -langchain = ">=0.2.0" +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry.group.dev.dependencies] -pytest = ">=2.8" -pytest-cov = ">=2.4.0" -pytest-asyncio = ">=0.21.0" -mypy = "==1.18.2" -pycodestyle = ">=2.11.0" -isort = ">=5.12.0" +[tool.hatch.build.targets.wheel] +packages = ["src/ldai_langchain"] [tool.mypy] python_version = "3.9" @@ -47,13 +57,7 @@ profile = "black" known_third_party = ["langchain", "langchain_core", "ldai"] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] - [tool.pytest.ini_options] addopts = ["-ra"] testpaths = ["tests"] asyncio_mode = "auto" - - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/packages/ai-providers/server-ai-openai/Makefile b/packages/ai-providers/server-ai-openai/Makefile index b14dfd9..5dbc500 100644 --- a/packages/ai-providers/server-ai-openai/Makefile +++ b/packages/ai-providers/server-ai-openai/Makefile @@ -9,22 +9,21 @@ help: #! Show this help message .PHONY: install install: #! Install package dependencies - poetry install + uv sync --group dev .PHONY: test test: #! Run unit tests test: install - poetry run pytest $(PYTEST_FLAGS) + uv run pytest $(PYTEST_FLAGS) .PHONY: lint lint: #! Run type analysis and linting checks lint: install - poetry run mypy src/ldai_openai - poetry run isort --check --atomic src/ldai_openai - poetry run pycodestyle src/ldai_openai + uv run mypy src/ldai_openai + uv run isort --check --atomic src/ldai_openai + uv run pycodestyle src/ldai_openai .PHONY: build build: #! Build distribution files build: install - poetry build - + uv build diff --git a/packages/ai-providers/server-ai-openai/pyproject.toml b/packages/ai-providers/server-ai-openai/pyproject.toml index 6307fa5..c32d5f3 100644 --- a/packages/ai-providers/server-ai-openai/pyproject.toml +++ b/packages/ai-providers/server-ai-openai/pyproject.toml @@ -1,12 +1,11 @@ -[tool.poetry] +[project] name = "launchdarkly-server-sdk-ai-openai" version = "0.2.0" description = "LaunchDarkly AI SDK OpenAI Provider" -authors = ["LaunchDarkly "] -license = "Apache-2.0" +authors = [{name = "LaunchDarkly", email = "dev@launchdarkly.com"}] +license = {text = "Apache-2.0"} readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/ai/python" -repository = "https://github.com/launchdarkly/python-server-sdk-ai" +requires-python = ">=3.9,<4" classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", @@ -20,20 +19,31 @@ classifiers = [ "Topic :: Software Development", "Topic :: Software Development :: Libraries", ] -packages = [{ include = "ldai_openai", from = "src" }] +dependencies = [ + "launchdarkly-server-sdk-ai>=0.16.0", + "openai>=1.0.0", +] -[tool.poetry.dependencies] -python = ">=3.9,<4" -launchdarkly-server-sdk-ai = ">=0.16.0" -openai = ">=1.0.0" +[project.urls] +Homepage = "https://docs.launchdarkly.com/sdk/ai/python" +Repository = "https://github.com/launchdarkly/python-server-sdk-ai" + +[dependency-groups] +dev = [ + "pytest>=2.8", + "pytest-cov>=2.4.0", + "pytest-asyncio>=0.21.0,<1.0.0", + "mypy==1.18.2", + "pycodestyle>=2.11.0", + "isort>=5.12.0", +] -[tool.poetry.group.dev.dependencies] -pytest = ">=2.8" -pytest-cov = ">=2.4.0" -pytest-asyncio = ">=0.21.0,<1.0.0" -mypy = "==1.18.2" -pycodestyle = ">=2.11.0" -isort = ">=5.12.0" +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/ldai_openai"] [tool.mypy] python_version = "3.9" @@ -46,14 +56,7 @@ profile = "black" known_third_party = ["openai", "ldai"] sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"] - [tool.pytest.ini_options] addopts = ["-ra"] testpaths = ["tests"] asyncio_mode = "auto" - - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - diff --git a/packages/sdk/server-ai/Makefile b/packages/sdk/server-ai/Makefile index 95f037c..281210e 100644 --- a/packages/sdk/server-ai/Makefile +++ b/packages/sdk/server-ai/Makefile @@ -15,26 +15,26 @@ help: #! Show this help message .PHONY: install install: #! Install package dependencies - poetry install + uv sync --group dev .PHONY: test test: #! Run unit tests test: install - poetry run pytest $(PYTEST_FLAGS) + uv run pytest $(PYTEST_FLAGS) .PHONY: lint lint: #! Run type analysis and linting checks lint: install - poetry run mypy src/ldai - poetry run isort --check --atomic src/ldai - poetry run pycodestyle src/ldai + uv run mypy src/ldai + uv run isort --check --atomic src/ldai + uv run pycodestyle src/ldai .PHONY: build build: #! Build distribution files build: install - poetry build + uv build .PHONY: docs docs: #! Generate sphinx-based documentation - poetry install --with docs - poetry run $(SPHINXBUILD) -M html "$(DOCS_DIR)" "$(DOCS_BUILD_DIR)" $(SPHINXOPTS) + uv sync --group docs + uv run $(SPHINXBUILD) -M html "$(DOCS_DIR)" "$(DOCS_BUILD_DIR)" $(SPHINXOPTS) diff --git a/packages/sdk/server-ai/pyproject.toml b/packages/sdk/server-ai/pyproject.toml index 18f3f7f..106532e 100644 --- a/packages/sdk/server-ai/pyproject.toml +++ b/packages/sdk/server-ai/pyproject.toml @@ -1,13 +1,11 @@ -[tool.poetry] +[project] name = "launchdarkly-server-sdk-ai" -version = "0.16.0" +version = "0.16.0" # x-release-please-version description = "LaunchDarkly SDK for AI" -authors = ["LaunchDarkly "] -license = "Apache-2.0" +authors = [{name = "LaunchDarkly", email = "dev@launchdarkly.com"}] +license = {text = "Apache-2.0"} readme = "README.md" -homepage = "https://docs.launchdarkly.com/sdk/ai/python" -repository = "https://github.com/launchdarkly/python-server-sdk-ai" -documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/" +requires-python = ">=3.9,<4" classifiers = [ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", @@ -21,37 +19,44 @@ classifiers = [ "Topic :: Software Development", "Topic :: Software Development :: Libraries", ] -packages = [{ include = "ldai", from = "src" }] - -[tool.poetry.dependencies] -python = ">=3.9,<4" -launchdarkly-server-sdk = ">=9.4.0" -chevron = "=0.14.0" - +dependencies = [ + "launchdarkly-server-sdk>=9.4.0", + "chevron==0.14.0", +] -[tool.poetry.group.dev.dependencies] -pytest = ">=2.8" -pytest-cov = ">=2.4.0" -pytest-mypy = "==1.0.1" -pytest-asyncio = ">=0.21.0" -mypy = "==1.18.2" -pycodestyle = "^2.12.1" -isort = ">=5.13.2,<7.0.0" +[project.urls] +Homepage = "https://docs.launchdarkly.com/sdk/ai/python" +Repository = "https://github.com/launchdarkly/python-server-sdk-ai" +Documentation = "https://launchdarkly-python-sdk-ai.readthedocs.io/en/latest/" +[dependency-groups] +dev = [ + "pytest>=2.8", + "pytest-cov>=2.4.0", + "pytest-mypy==1.0.1", + "pytest-asyncio>=0.21.0", + "mypy==1.18.2", + "pycodestyle>=2.12.1,<3.0.0", + "isort>=5.13.2,<7.0.0", +] +docs = [ + "sphinx>=6,<8", + "sphinx-rtd-theme>=1.3,<4.0", + "certifi>=2018.4.16", + "expiringdict>=1.1.4", + "pyrfc3339>=1.0", + "jsonpickle>1.4.1", + "semver>=2.7.9", + "urllib3>=1.26.0", + "jinja2==3.1.6", +] -[tool.poetry.group.docs] -optional = true +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry.group.docs.dependencies] -sphinx = ">=6,<8" -sphinx-rtd-theme = ">=1.3,<4.0" -certifi = ">=2018.4.16" -expiringdict = ">=1.1.4" -pyrfc3339 = ">=1.0" -jsonpickle = ">1.4.1" -semver = ">=2.7.9" -urllib3 = ">=1.26.0" -jinja2 = "3.1.6" +[tool.hatch.build.targets.wheel] +packages = ["src/ldai"] [tool.mypy] python_version = "3.9" @@ -59,12 +64,9 @@ ignore_missing_imports = true install_types = true non_interactive = true - [tool.pytest.ini_options] addopts = ["-ra"] testpaths = ["tests"] - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +[tool.isort] +profile = "black" diff --git a/pyproject.toml b/pyproject.toml index a38cffc..2f7ace5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,23 +1,22 @@ # Root pyproject.toml for the LaunchDarkly Python AI SDK monorepo # -# This monorepo contains: -# - packages/sdk/server-ai: The main LaunchDarkly Server-Side AI SDK -# - packages/ai-providers/server-ai-langchain: LangChain integration (placeholder) +# This is a uv workspace root. Running `uv sync` here installs all packages +# and their dependencies into a single shared virtual environment at the +# repo root. # -# For development, use the package-specific pyproject.toml files in: -# - packages/sdk/server-ai/pyproject.toml -# - packages/ai-providers/server-ai-langchain/pyproject.toml +# Workspace members: +# - packages/sdk/server-ai (launchdarkly-server-sdk-ai) +# - packages/ai-providers/server-ai-openai (launchdarkly-server-sdk-ai-openai) +# - packages/ai-providers/server-ai-langchain (launchdarkly-server-sdk-ai-langchain) -[tool.poetry] -name = "launchdarkly-python-sdk-ai-monorepo" -version = "0.0.0" -description = "LaunchDarkly Python AI SDK Monorepo" -authors = ["LaunchDarkly "] -license = "Apache-2.0" +[tool.uv.workspace] +members = [ + "packages/sdk/server-ai", + "packages/ai-providers/server-ai-openai", + "packages/ai-providers/server-ai-langchain", +] -[tool.poetry.dependencies] -python = ">=3.9,<4" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +# Resolve launchdarkly-server-sdk-ai from the local workspace member rather +# than PyPI. This applies to all workspace members automatically. +[tool.uv.sources] +launchdarkly-server-sdk-ai = {workspace = true} From 3ab2baf2c9120acc4ed023f1b8eba477c2060e89 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 10 Mar 2026 15:57:05 -0500 Subject: [PATCH 02/11] chore: Update GitHub Actions workflows to use uv --- .github/actions/ci/action.yml | 10 +++----- .github/workflows/ci.yml | 47 +++++------------------------------ 2 files changed, 9 insertions(+), 48 deletions(-) diff --git a/.github/actions/ci/action.yml b/.github/actions/ci/action.yml index e744ef6..fd0f0bd 100644 --- a/.github/actions/ci/action.yml +++ b/.github/actions/ci/action.yml @@ -12,18 +12,14 @@ inputs: runs: using: composite steps: - - name: Set up Python ${{ inputs.python_version }} - uses: actions/setup-python@v5 + - name: Set up uv + uses: astral-sh/setup-uv@v5 with: python-version: ${{ inputs.python_version }} - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - name: Install Dependencies shell: bash - working-directory: ${{ inputs.workspace_path }} - run: poetry install + run: uv sync --all-groups - name: Lint shell: bash diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9ee818d..fdaefaf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,18 +44,11 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + - name: Set up uv + uses: astral-sh/setup-uv@v5 with: python-version: ${{ matrix.python-version }} - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - - name: Install requirements - working-directory: packages/sdk/server-ai - run: poetry install - - name: Run tests run: make -C packages/sdk/server-ai test @@ -90,25 +83,11 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + - name: Set up uv + uses: astral-sh/setup-uv@v5 with: python-version: ${{ matrix.python-version }} - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - - name: Configure poetry for local virtualenvs - run: poetry config virtualenvs.in-project true - - - name: Install server-ai dependency first - working-directory: packages/sdk/server-ai - run: poetry install - - - name: Install requirements - working-directory: packages/ai-providers/server-ai-langchain - run: poetry install - - name: Run tests run: make -C packages/ai-providers/server-ai-langchain test @@ -143,24 +122,10 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v5 + - name: Set up uv + uses: astral-sh/setup-uv@v5 with: python-version: ${{ matrix.python-version }} - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - - name: Configure poetry for local virtualenvs - run: poetry config virtualenvs.in-project true - - - name: Install server-ai dependency first - working-directory: packages/sdk/server-ai - run: poetry install - - - name: Install requirements - working-directory: packages/ai-providers/server-ai-openai - run: poetry install - - name: Run tests run: make -C packages/ai-providers/server-ai-openai test From 1ea080ae4403dc6a01eb9c14ffea6320708535b8 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 10 Mar 2026 16:08:24 -0500 Subject: [PATCH 03/11] fix: Specify out-dir in mono repo --- packages/ai-providers/server-ai-langchain/Makefile | 2 +- packages/ai-providers/server-ai-openai/Makefile | 2 +- packages/sdk/server-ai/Makefile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/Makefile b/packages/ai-providers/server-ai-langchain/Makefile index efc820f..3bb77b1 100644 --- a/packages/ai-providers/server-ai-langchain/Makefile +++ b/packages/ai-providers/server-ai-langchain/Makefile @@ -26,4 +26,4 @@ lint: install .PHONY: build build: #! Build distribution files build: install - uv build + uv build --out-dir dist diff --git a/packages/ai-providers/server-ai-openai/Makefile b/packages/ai-providers/server-ai-openai/Makefile index 5dbc500..7f1c57d 100644 --- a/packages/ai-providers/server-ai-openai/Makefile +++ b/packages/ai-providers/server-ai-openai/Makefile @@ -26,4 +26,4 @@ lint: install .PHONY: build build: #! Build distribution files build: install - uv build + uv build --out-dir dist diff --git a/packages/sdk/server-ai/Makefile b/packages/sdk/server-ai/Makefile index 281210e..492b42b 100644 --- a/packages/sdk/server-ai/Makefile +++ b/packages/sdk/server-ai/Makefile @@ -32,7 +32,7 @@ lint: install .PHONY: build build: #! Build distribution files build: install - uv build + uv build --out-dir dist .PHONY: docs docs: #! Generate sphinx-based documentation From 397d2d4eba0f02e90be94da3566dd8678b943b2b Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Tue, 10 Mar 2026 16:56:07 -0500 Subject: [PATCH 04/11] address code review feedback --- .github/actions/ci/action.yml | 2 +- .github/workflows/ci.yml | 6 ++--- .github/workflows/release-please.yml | 28 --------------------- packages/sdk/server-ai/src/ldai/__init__.py | 24 ++++++++++++++---- packages/sdk/server-ai/src/ldai/client.py | 22 +++++++++++----- 5 files changed, 39 insertions(+), 43 deletions(-) diff --git a/.github/actions/ci/action.yml b/.github/actions/ci/action.yml index fd0f0bd..10f9392 100644 --- a/.github/actions/ci/action.yml +++ b/.github/actions/ci/action.yml @@ -13,7 +13,7 @@ runs: using: composite steps: - name: Set up uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4 with: python-version: ${{ inputs.python_version }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fdaefaf..232dda5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,7 +45,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4 with: python-version: ${{ matrix.python-version }} @@ -84,7 +84,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4 with: python-version: ${{ matrix.python-version }} @@ -123,7 +123,7 @@ jobs: - uses: actions/checkout@v4 - name: Set up uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@6ee6290f1cbc4156c0bdd66691b2c144ef8df19a # v7.4 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index eb6d555..87f56eb 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -63,13 +63,6 @@ jobs: with: fetch-depth: 0 - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - uses: ./.github/actions/ci with: workspace_path: packages/sdk/server-ai @@ -104,13 +97,6 @@ jobs: with: fetch-depth: 0 - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - uses: ./.github/actions/ci with: workspace_path: packages/ai-providers/server-ai-langchain @@ -141,13 +127,6 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - uses: ./.github/actions/ci with: workspace_path: ${{ inputs.workspace_path }} @@ -210,13 +189,6 @@ jobs: with: fetch-depth: 0 - - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install poetry - uses: abatilo/actions-poetry@7b6d33e44b4f08d7021a1dee3c044e9c253d6439 - - uses: ./.github/actions/ci with: workspace_path: packages/ai-providers/server-ai-openai diff --git a/packages/sdk/server-ai/src/ldai/__init__.py b/packages/sdk/server-ai/src/ldai/__init__.py index 2868b57..da2340c 100644 --- a/packages/sdk/server-ai/src/ldai/__init__.py +++ b/packages/sdk/server-ai/src/ldai/__init__.py @@ -7,11 +7,25 @@ from ldai.client import LDAIClient from ldai.judge import Judge from ldai.models import ( # Deprecated aliases for backward compatibility - AIAgentConfig, AIAgentConfigDefault, AIAgentConfigRequest, - AIAgentGraphConfig, AIAgents, AICompletionConfig, - AICompletionConfigDefault, AIConfig, AIJudgeConfig, AIJudgeConfigDefault, - Edge, JudgeConfiguration, LDAIAgent, LDAIAgentConfig, LDAIAgentDefaults, - LDMessage, ModelConfig, ProviderConfig) + AIAgentConfig, + AIAgentConfigDefault, + AIAgentConfigRequest, + AIAgentGraphConfig, + AIAgents, + AICompletionConfig, + AICompletionConfigDefault, + AIConfig, + AIJudgeConfig, + AIJudgeConfigDefault, + Edge, + JudgeConfiguration, + LDAIAgent, + LDAIAgentConfig, + LDAIAgentDefaults, + LDMessage, + ModelConfig, + ProviderConfig, +) from ldai.providers.types import EvalScore, JudgeResponse from ldai.tracker import AIGraphTracker diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index 8289d06..ae79bd9 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -8,12 +8,22 @@ from ldai.agent_graph import AgentGraphDefinition from ldai.chat import Chat from ldai.judge import Judge -from ldai.models import (AIAgentConfig, AIAgentConfigDefault, - AIAgentConfigRequest, AIAgentGraphConfig, AIAgents, - AICompletionConfig, AICompletionConfigDefault, - AIJudgeConfig, AIJudgeConfigDefault, Edge, - JudgeConfiguration, LDMessage, ModelConfig, - ProviderConfig) +from ldai.models import ( + AIAgentConfig, + AIAgentConfigDefault, + AIAgentConfigRequest, + AIAgentGraphConfig, + AIAgents, + AICompletionConfig, + AICompletionConfigDefault, + AIJudgeConfig, + AIJudgeConfigDefault, + Edge, + JudgeConfiguration, + LDMessage, + ModelConfig, + ProviderConfig, +) from ldai.providers.ai_provider_factory import AIProviderFactory from ldai.sdk_info import AI_SDK_LANGUAGE, AI_SDK_NAME, AI_SDK_VERSION from ldai.tracker import AIGraphTracker, LDAIConfigTracker From dee9255ee6816b9e7895788cd52c34cd1edece20 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 25 Mar 2026 09:16:50 -0500 Subject: [PATCH 05/11] always install --- .github/actions/ci/action.yml | 2 +- .github/workflows/ci.yml | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/actions/ci/action.yml b/.github/actions/ci/action.yml index 10f9392..0bc9753 100644 --- a/.github/actions/ci/action.yml +++ b/.github/actions/ci/action.yml @@ -19,7 +19,7 @@ runs: - name: Install Dependencies shell: bash - run: uv sync --all-groups + run: make -C ${{ inputs.workspace_path }} install - name: Lint shell: bash diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 232dda5..ac56226 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,6 +49,9 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: make -C packages/sdk/server-ai install + - name: Run tests run: make -C packages/sdk/server-ai test @@ -88,6 +91,9 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: make -C packages/ai-providers/server-ai-langchain install + - name: Run tests run: make -C packages/ai-providers/server-ai-langchain test @@ -127,5 +133,8 @@ jobs: with: python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: make -C packages/ai-providers/server-ai-openai install + - name: Run tests run: make -C packages/ai-providers/server-ai-openai test From e10be52a55fe507b7135550959dcf284bec5ae94 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 11 Mar 2026 12:06:39 -0500 Subject: [PATCH 06/11] feat: Support additional create methods for agent and agent_graph MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit feat!: Rename AIProviderFactory → RunnerFactory feat!: Rename OpenAIProvider → OpenAIRunnerFactory import from ldai_openai.openai_runner_factory feat!: Rename LangChainProvider to LangChainRunnerFactory import from ldai_langchain.langchain_runner_factory feat: Add create_model(), create_agent(), create_agent_graph() to AIProvider ABC (non-abstract, default warns) --- .../src/ldai_langchain/__init__.py | 9 +- ...rovider.py => langchain_runner_factory.py} | 84 ++++---- .../tests/test_langchain_provider.py | 54 +++--- .../src/ldai_openai/__init__.py | 8 +- ...i_provider.py => openai_runner_factory.py} | 75 ++++---- .../tests/test_openai_provider.py | 18 +- packages/sdk/server-ai/src/ldai/client.py | 6 +- .../server-ai/src/ldai/providers/__init__.py | 6 +- .../src/ldai/providers/ai_provider.py | 77 +++++--- .../src/ldai/providers/ai_provider_factory.py | 125 ------------ .../src/ldai/providers/runner_factory.py | 181 ++++++++++++++++++ 11 files changed, 366 insertions(+), 277 deletions(-) rename packages/ai-providers/server-ai-langchain/src/ldai_langchain/{langchain_provider.py => langchain_runner_factory.py} (77%) rename packages/ai-providers/server-ai-openai/src/ldai_openai/{openai_provider.py => openai_runner_factory.py} (78%) delete mode 100644 packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py create mode 100644 packages/sdk/server-ai/src/ldai/providers/runner_factory.py diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py index 1282648..ee58f4e 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py @@ -1,13 +1,10 @@ -"""LaunchDarkly AI SDK - LangChain Provider. +"""LaunchDarkly AI SDK - LangChain Connector.""" -This package provides LangChain integration for the LaunchDarkly Server-Side AI SDK, -""" - -from ldai_langchain.langchain_provider import LangChainProvider +from ldai_langchain.langchain_runner_factory import LangChainRunnerFactory __version__ = "0.1.0" __all__ = [ '__version__', - 'LangChainProvider', + 'LangChainRunnerFactory', ] diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py similarity index 77% rename from packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py rename to packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py index 702a6f0..78803a0 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py @@ -1,4 +1,4 @@ -"""LangChain implementation of AIProvider for LaunchDarkly AI SDK.""" +"""LangChain connector for LaunchDarkly AI SDK.""" from typing import Any, Dict, List, Optional, Union @@ -11,31 +11,46 @@ from ldai.tracker import TokenUsage -class LangChainProvider(AIProvider): +class LangChainRunnerFactory(AIProvider): """ - LangChain implementation of AIProvider. - - This provider integrates LangChain models with LaunchDarkly's tracking capabilities. + LangChain connector for the LaunchDarkly AI SDK. + + Can be used in two ways: + - Transparently via ExecutorFactory (pass ``default_ai_provider='langchain'`` to + ``create_model()`` / ``create_chat()``). + - Directly for full control: instantiate with a ``BaseChatModel``, then call + ``invoke_model()`` yourself and use the static convenience methods + (``get_ai_metrics_from_response``, ``convert_messages_to_langchain``, + ``map_provider``, ``create_langchain_model``). """ - def __init__(self, llm: BaseChatModel): + def __init__(self, llm: Optional[BaseChatModel] = None): """ - Initialize the LangChain provider. + Initialize the LangChain connector. + + When called with no arguments the connector acts as a per-provider factory + — call ``create_model(config)`` to obtain a configured instance. - :param llm: A LangChain BaseChatModel instance + When called with an explicit ``llm`` the connector is ready to invoke + the model immediately. + + :param llm: A LangChain BaseChatModel instance (optional) """ self._llm = llm - @staticmethod - async def create(ai_config: AIConfigKind) -> 'LangChainProvider': + # --- AIProvider factory methods --- + + def create_model(self, config: AIConfigKind) -> 'LangChainRunnerFactory': """ - Static factory method to create a LangChain AIProvider from an AI configuration. + Create a configured LangChain model connector for the given AI config. - :param ai_config: The LaunchDarkly AI configuration - :return: Configured LangChainProvider instance + :param config: The LaunchDarkly AI configuration + :return: Configured LangChainRunnerFactory ready to invoke the model """ - llm = LangChainProvider.create_langchain_model(ai_config) - return LangChainProvider(llm) + llm = LangChainRunnerFactory.create_langchain_model(config) + return LangChainRunnerFactory(llm) + + # --- Model invocation --- async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: """ @@ -45,9 +60,9 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: :return: ChatResponse containing the model's response and metrics """ try: - langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) + langchain_messages = LangChainRunnerFactory.convert_messages_to_langchain(messages) response: BaseMessage = await self._llm.ainvoke(langchain_messages) - metrics = LangChainProvider.get_ai_metrics_from_response(response) + metrics = LangChainRunnerFactory.get_ai_metrics_from_response(response) content: str = '' if isinstance(response.content, str): @@ -89,7 +104,7 @@ async def invoke_structured_model( metrics=LDAIMetrics(success=False, usage=None), ) try: - langchain_messages = LangChainProvider.convert_messages_to_langchain(messages) + langchain_messages = LangChainRunnerFactory.convert_messages_to_langchain(messages) structured_llm = self._llm.with_structured_output(response_structure, include_raw=True) response = await structured_llm.ainvoke(langchain_messages) @@ -104,7 +119,7 @@ async def invoke_structured_model( if raw_response is not None: if hasattr(raw_response, 'content'): structured_response.raw_response = raw_response.content - structured_response.metrics.usage = LangChainProvider.get_ai_usage_from_response(raw_response) + structured_response.metrics.usage = LangChainRunnerFactory.get_ai_usage_from_response(raw_response) if response.get('parsing_error'): log.warning(f'LangChain structured model invocation had a parsing error') @@ -117,11 +132,13 @@ async def invoke_structured_model( log.warning(f'LangChain structured model invocation failed: {error}') return structured_response - def get_chat_model(self) -> BaseChatModel: + # --- Convenience accessors --- + + def get_chat_model(self) -> Optional[BaseChatModel]: """ Get the underlying LangChain model instance. - :return: The underlying BaseChatModel + :return: The underlying BaseChatModel, or None if not yet configured """ return self._llm @@ -174,23 +191,19 @@ def get_ai_usage_from_response(response: BaseMessage) -> TokenUsage: @staticmethod def get_ai_metrics_from_response(response: BaseMessage) -> LDAIMetrics: """ - Get AI metrics from a LangChain provider response. - - This method extracts token usage information and success status from LangChain responses - and returns a LaunchDarkly AIMetrics object. + Extract LaunchDarkly AI metrics from a LangChain response. :param response: The response from the LangChain model :return: LDAIMetrics with success status and token usage - Example: - # Use with tracker.track_metrics_of for automatic tracking + Example:: + response = await tracker.track_metrics_of( lambda: llm.ainvoke(messages), - LangChainProvider.get_ai_metrics_from_response + LangChainRunnerFactory.get_ai_metrics_from_response ) """ - # Extract token usage if available - usage = LangChainProvider.get_ai_usage_from_response(response) + usage = LangChainRunnerFactory.get_ai_usage_from_response(response) return LDAIMetrics(success=True, usage=usage) @@ -201,9 +214,6 @@ def convert_messages_to_langchain( """ Convert LaunchDarkly messages to LangChain messages. - This helper method enables developers to work directly with LangChain message types - while maintaining compatibility with LaunchDarkly's standardized message format. - :param messages: List of LDMessage objects :return: List of LangChain message objects :raises ValueError: If an unsupported message role is encountered @@ -225,10 +235,7 @@ def convert_messages_to_langchain( @staticmethod def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: """ - Create a LangChain model from an AI configuration. - - This public helper method enables developers to initialize their own LangChain models - using LaunchDarkly AI configurations. + Create a LangChain model from a LaunchDarkly AI configuration. :param ai_config: The LaunchDarkly AI configuration :return: A configured LangChain BaseChatModel @@ -242,7 +249,7 @@ def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: model_name = model_dict.get('name', '') provider = provider_dict.get('name', '') parameters = dict(model_dict.get('parameters') or {}) - mapped_provider = LangChainProvider.map_provider(provider) + mapped_provider = LangChainRunnerFactory.map_provider(provider) # Bedrock requires the foundation provider (e.g. Bedrock:Anthropic) passed in # parameters separately from model_provider, which is used for LangChain routing. @@ -253,3 +260,4 @@ def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: model_provider=mapped_provider, **parameters, ) + diff --git a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py index 402faa6..9abc047 100644 --- a/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py +++ b/packages/ai-providers/server-ai-langchain/tests/test_langchain_provider.py @@ -7,7 +7,7 @@ from ldai import LDMessage -from ldai_langchain import LangChainProvider +from ldai_langchain import LangChainRunnerFactory class TestConvertMessagesToLangchain: @@ -16,7 +16,7 @@ class TestConvertMessagesToLangchain: def test_converts_system_messages_to_system_message(self): """Should convert system messages to SystemMessage.""" messages = [LDMessage(role='system', content='You are a helpful assistant.')] - result = LangChainProvider.convert_messages_to_langchain(messages) + result = LangChainRunnerFactory.convert_messages_to_langchain(messages) assert len(result) == 1 assert isinstance(result[0], SystemMessage) @@ -25,7 +25,7 @@ def test_converts_system_messages_to_system_message(self): def test_converts_user_messages_to_human_message(self): """Should convert user messages to HumanMessage.""" messages = [LDMessage(role='user', content='Hello, how are you?')] - result = LangChainProvider.convert_messages_to_langchain(messages) + result = LangChainRunnerFactory.convert_messages_to_langchain(messages) assert len(result) == 1 assert isinstance(result[0], HumanMessage) @@ -34,7 +34,7 @@ def test_converts_user_messages_to_human_message(self): def test_converts_assistant_messages_to_ai_message(self): """Should convert assistant messages to AIMessage.""" messages = [LDMessage(role='assistant', content='I am doing well, thank you!')] - result = LangChainProvider.convert_messages_to_langchain(messages) + result = LangChainRunnerFactory.convert_messages_to_langchain(messages) assert len(result) == 1 assert isinstance(result[0], AIMessage) @@ -47,7 +47,7 @@ def test_converts_multiple_messages_in_order(self): LDMessage(role='user', content='What is the weather like?'), LDMessage(role='assistant', content='I cannot check the weather.'), ] - result = LangChainProvider.convert_messages_to_langchain(messages) + result = LangChainRunnerFactory.convert_messages_to_langchain(messages) assert len(result) == 3 assert isinstance(result[0], SystemMessage) @@ -62,11 +62,11 @@ class MockMessage: content = 'Test message' with pytest.raises(ValueError, match='Unsupported message role: unknown'): - LangChainProvider.convert_messages_to_langchain([MockMessage()]) # type: ignore + LangChainRunnerFactory.convert_messages_to_langchain([MockMessage()]) # type: ignore def test_handles_empty_message_array(self): """Should handle empty message array.""" - result = LangChainProvider.convert_messages_to_langchain([]) + result = LangChainRunnerFactory.convert_messages_to_langchain([]) assert len(result) == 0 @@ -84,7 +84,7 @@ def test_creates_metrics_with_success_true_and_token_usage(self): }, } - result = LangChainProvider.get_ai_metrics_from_response(mock_response) + result = LangChainRunnerFactory.get_ai_metrics_from_response(mock_response) assert result.success is True assert result.usage is not None @@ -103,7 +103,7 @@ def test_creates_metrics_with_snake_case_token_usage(self): }, } - result = LangChainProvider.get_ai_metrics_from_response(mock_response) + result = LangChainRunnerFactory.get_ai_metrics_from_response(mock_response) assert result.success is True assert result.usage is not None @@ -115,7 +115,7 @@ def test_creates_metrics_with_success_true_and_no_usage_when_metadata_missing(se """Should create metrics with success=True and no usage when metadata is missing.""" mock_response = AIMessage(content='Test response') - result = LangChainProvider.get_ai_metrics_from_response(mock_response) + result = LangChainRunnerFactory.get_ai_metrics_from_response(mock_response) assert result.success is True assert result.usage is None @@ -126,23 +126,23 @@ class TestMapProvider: def test_maps_gemini_to_google_genai(self): """Should map gemini to google-genai.""" - assert LangChainProvider.map_provider('gemini') == 'google-genai' - assert LangChainProvider.map_provider('Gemini') == 'google-genai' - assert LangChainProvider.map_provider('GEMINI') == 'google-genai' + assert LangChainRunnerFactory.map_provider('gemini') == 'google-genai' + assert LangChainRunnerFactory.map_provider('Gemini') == 'google-genai' + assert LangChainRunnerFactory.map_provider('GEMINI') == 'google-genai' def test_maps_bedrock_and_model_families_to_bedrock_converse(self): """Should map bedrock and bedrock:model_family to bedrock_converse.""" - assert LangChainProvider.map_provider('bedrock') == 'bedrock_converse' - assert LangChainProvider.map_provider('Bedrock:Anthropic') == 'bedrock_converse' - assert LangChainProvider.map_provider('bedrock:anthropic') == 'bedrock_converse' - assert LangChainProvider.map_provider('bedrock:amazon') == 'bedrock_converse' - assert LangChainProvider.map_provider('bedrock:cohere') == 'bedrock_converse' + assert LangChainRunnerFactory.map_provider('bedrock') == 'bedrock_converse' + assert LangChainRunnerFactory.map_provider('Bedrock:Anthropic') == 'bedrock_converse' + assert LangChainRunnerFactory.map_provider('bedrock:anthropic') == 'bedrock_converse' + assert LangChainRunnerFactory.map_provider('bedrock:amazon') == 'bedrock_converse' + assert LangChainRunnerFactory.map_provider('bedrock:cohere') == 'bedrock_converse' def test_returns_provider_name_unchanged_for_unmapped_providers(self): """Should return provider name unchanged for unmapped providers.""" - assert LangChainProvider.map_provider('openai') == 'openai' - assert LangChainProvider.map_provider('anthropic') == 'anthropic' - assert LangChainProvider.map_provider('unknown') == 'unknown' + assert LangChainRunnerFactory.map_provider('openai') == 'openai' + assert LangChainRunnerFactory.map_provider('anthropic') == 'anthropic' + assert LangChainRunnerFactory.map_provider('unknown') == 'unknown' class TestInvokeModel: @@ -158,7 +158,7 @@ async def test_returns_success_true_for_string_content(self, mock_llm): """Should return success=True for string content.""" mock_response = AIMessage(content='Test response') mock_llm.ainvoke = AsyncMock(return_value=mock_response) - provider = LangChainProvider(mock_llm) + provider = LangChainRunnerFactory(mock_llm) messages = [LDMessage(role='user', content='Hello')] result = await provider.invoke_model(messages) @@ -171,7 +171,7 @@ async def test_returns_success_false_for_non_string_content_and_logs_warning(sel """Should return success=False for non-string content and log warning.""" mock_response = AIMessage(content=[{'type': 'image', 'data': 'base64data'}]) mock_llm.ainvoke = AsyncMock(return_value=mock_response) - provider = LangChainProvider(mock_llm) + provider = LangChainRunnerFactory(mock_llm) messages = [LDMessage(role='user', content='Hello')] result = await provider.invoke_model(messages) @@ -184,7 +184,7 @@ async def test_returns_success_false_when_model_invocation_throws_error(self, mo """Should return success=False when model invocation throws an error.""" error = Exception('Model invocation failed') mock_llm.ainvoke = AsyncMock(side_effect=error) - provider = LangChainProvider(mock_llm) + provider = LangChainRunnerFactory(mock_llm) messages = [LDMessage(role='user', content='Hello')] result = await provider.invoke_model(messages) @@ -210,7 +210,7 @@ async def test_returns_success_true_for_successful_invocation(self, mock_llm): mock_structured_llm = MagicMock() mock_structured_llm.ainvoke = AsyncMock(return_value=mock_response) mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm) - provider = LangChainProvider(mock_llm) + provider = LangChainRunnerFactory(mock_llm) messages = [LDMessage(role='user', content='Hello')] response_structure = {'type': 'object', 'properties': {}} @@ -226,7 +226,7 @@ async def test_returns_success_false_when_structured_model_invocation_throws_err mock_structured_llm = MagicMock() mock_structured_llm.ainvoke = AsyncMock(side_effect=error) mock_llm.with_structured_output = MagicMock(return_value=mock_structured_llm) - provider = LangChainProvider(mock_llm) + provider = LangChainRunnerFactory(mock_llm) messages = [LDMessage(role='user', content='Hello')] response_structure = {'type': 'object', 'properties': {}} @@ -244,7 +244,7 @@ class TestGetChatModel: def test_returns_underlying_llm(self): """Should return the underlying LLM.""" mock_llm = MagicMock() - provider = LangChainProvider(mock_llm) + provider = LangChainRunnerFactory(mock_llm) assert provider.get_chat_model() is mock_llm diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py index 5d5120f..1284f48 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py @@ -1,5 +1,7 @@ -"""LaunchDarkly AI SDK OpenAI Provider.""" +"""LaunchDarkly AI SDK OpenAI Connector.""" -from ldai_openai.openai_provider import OpenAIProvider +from ldai_openai.openai_runner_factory import OpenAIProvider -__all__ = ['OpenAIProvider'] +__all__ = [ + 'OpenAIProvider', +] diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py similarity index 78% rename from packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py rename to packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index c62cc80..1313404 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_provider.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -1,4 +1,4 @@ -"""OpenAI implementation of AIProvider for LaunchDarkly AI SDK.""" +"""OpenAI connector for LaunchDarkly AI SDK.""" import json import os @@ -15,46 +15,60 @@ class OpenAIProvider(AIProvider): """ - OpenAI implementation of AIProvider. + OpenAI connector for the LaunchDarkly AI SDK. - This provider integrates OpenAI's chat completions API with LaunchDarkly's tracking capabilities. + Can be used in two ways: + - Transparently via ExecutorFactory (pass ``default_ai_provider='openai'`` to + ``create_model()`` / ``create_chat()``). + - Directly for full control: instantiate with an ``AsyncOpenAI`` client, + model name, and parameters, then call ``invoke_model()`` yourself. """ def __init__( self, - client: AsyncOpenAI, - model_name: str, - parameters: Dict[str, Any], + client: Optional[AsyncOpenAI] = None, + model_name: str = '', + parameters: Optional[Dict[str, Any]] = None, ): """ - Initialize the OpenAI provider. + Initialize the OpenAI connector. - :param client: An AsyncOpenAI client instance + When called with no arguments the connector reads credentials from the + environment (``OPENAI_API_KEY``) and acts as a per-provider factory — + call ``create_model(config)`` to obtain a configured instance. + + When called with explicit arguments the connector is ready to invoke + the model immediately. + + :param client: An AsyncOpenAI client instance (created from env if omitted) :param model_name: The name of the model to use :param parameters: Additional model parameters """ - self._client = client + self._client = client if client is not None else AsyncOpenAI( + api_key=os.environ.get('OPENAI_API_KEY'), + ) self._model_name = model_name - self._parameters = parameters + self._parameters = parameters or {} - @staticmethod - async def create(ai_config: AIConfigKind) -> 'OpenAIProvider': - """ - Static factory method to create an OpenAI AIProvider from an AI configuration. + # --- AIProvider factory methods --- - :param ai_config: The LaunchDarkly AI configuration - :return: Configured OpenAIProvider instance + def create_model(self, config: AIConfigKind) -> 'OpenAIProvider': """ - client = AsyncOpenAI( - api_key=os.environ.get('OPENAI_API_KEY'), - ) + Create a configured OpenAI model connector for the given AI config. + + Reuses the underlying AsyncOpenAI client so that connection pooling is + preserved across calls. - config_dict = ai_config.to_dict() + :param config: The LaunchDarkly AI configuration + :return: Configured OpenAIProvider ready to invoke the model + """ + config_dict = config.to_dict() model_dict = config_dict.get('model') or {} model_name = model_dict.get('name', '') parameters = model_dict.get('parameters') or {} + return OpenAIProvider(self._client, model_name, parameters) - return OpenAIProvider(client, model_name, parameters) + # --- Model invocation --- async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: """ @@ -64,7 +78,6 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: :return: ChatResponse containing the model's response and metrics """ try: - # Convert LDMessage to OpenAI message format openai_messages: Iterable[ChatCompletionMessageParam] = cast( Iterable[ChatCompletionMessageParam], [{'role': msg.role, 'content': msg.content} for msg in messages] @@ -76,10 +89,8 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: **self._parameters, ) - # Generate metrics early (assumes success by default) metrics = OpenAIProvider.get_ai_metrics_from_response(response) - # Safely extract the first choice content content = '' if response.choices and len(response.choices) > 0: message = response.choices[0].message @@ -115,7 +126,6 @@ async def invoke_structured_model( :return: StructuredResponse containing the structured data """ try: - # Convert LDMessage to OpenAI message format openai_messages: Iterable[ChatCompletionMessageParam] = cast( Iterable[ChatCompletionMessageParam], [{'role': msg.role, 'content': msg.content} for msg in messages] @@ -135,10 +145,8 @@ async def invoke_structured_model( **self._parameters, ) - # Generate metrics early (assumes success by default) metrics = OpenAIProvider.get_ai_metrics_from_response(response) - # Safely extract the first choice content content = '' if response.choices and len(response.choices) > 0: message = response.choices[0].message @@ -178,6 +186,8 @@ async def invoke_structured_model( metrics=LDAIMetrics(success=False, usage=None), ) + # --- Convenience accessors --- + def get_client(self) -> AsyncOpenAI: """ Get the underlying OpenAI client instance. @@ -189,21 +199,18 @@ def get_client(self) -> AsyncOpenAI: @staticmethod def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: """ - Get AI metrics from an OpenAI response. - - This method extracts token usage information and success status from OpenAI responses - and returns a LaunchDarkly AIMetrics object. + Extract LaunchDarkly AI metrics from an OpenAI response. :param response: The response from OpenAI chat completions API :return: LDAIMetrics with success status and token usage - Example: + Example:: + response = await tracker.track_metrics_of( lambda: client.chat.completions.create(config), OpenAIProvider.get_ai_metrics_from_response ) """ - # Extract token usage if available usage: Optional[TokenUsage] = None if hasattr(response, 'usage') and response.usage: usage = TokenUsage( @@ -212,5 +219,5 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: output=response.usage.completion_tokens or 0, ) - # OpenAI responses that complete successfully are considered successful by default return LDAIMetrics(success=True, usage=usage) + diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py index ff9066b..d684df0 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py @@ -277,11 +277,10 @@ def test_returns_underlying_client(self): assert provider.get_client() is mock_client -class TestCreate: - """Tests for create static factory method.""" +class TestCreateModel: + """Tests for create_model instance method.""" - @pytest.mark.asyncio - async def test_creates_provider_with_correct_model_and_parameters(self): + def test_creates_connector_with_correct_model_and_parameters(self): """Should create OpenAIProvider with correct model and parameters.""" mock_ai_config = MagicMock() mock_ai_config.to_dict.return_value = { @@ -295,27 +294,26 @@ async def test_creates_provider_with_correct_model_and_parameters(self): 'provider': {'name': 'openai'}, } - with patch('ldai_openai.openai_provider.AsyncOpenAI') as mock_openai_class: + with patch('ldai_openai.openai_runner_factory.AsyncOpenAI') as mock_openai_class: mock_client = MagicMock() mock_openai_class.return_value = mock_client - result = await OpenAIProvider.create(mock_ai_config) + result = OpenAIProvider().create_model(mock_ai_config) assert isinstance(result, OpenAIProvider) assert result._model_name == 'gpt-4' assert result._parameters == {'temperature': 0.7, 'max_tokens': 1000} - @pytest.mark.asyncio - async def test_handles_missing_model_config(self): + def test_handles_missing_model_config(self): """Should handle missing model configuration.""" mock_ai_config = MagicMock() mock_ai_config.to_dict.return_value = {} - with patch('ldai_openai.openai_provider.AsyncOpenAI') as mock_openai_class: + with patch('ldai_openai.openai_runner_factory.AsyncOpenAI') as mock_openai_class: mock_client = MagicMock() mock_openai_class.return_value = mock_client - result = await OpenAIProvider.create(mock_ai_config) + result = OpenAIProvider().create_model(mock_ai_config) assert isinstance(result, OpenAIProvider) assert result._model_name == '' diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index ae79bd9..a87bef0 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -24,7 +24,7 @@ ModelConfig, ProviderConfig, ) -from ldai.providers.ai_provider_factory import AIProviderFactory +from ldai.providers.runner_factory import RunnerFactory from ldai.sdk_info import AI_SDK_LANGUAGE, AI_SDK_NAME, AI_SDK_VERSION from ldai.tracker import AIGraphTracker, LDAIConfigTracker @@ -245,7 +245,7 @@ async def create_judge( if not judge_config.enabled or not judge_config.tracker: return None - provider = await AIProviderFactory.create(judge_config, default_ai_provider) + provider = await RunnerFactory.create_model(judge_config, default_ai_provider) if not provider: return None @@ -346,7 +346,7 @@ async def create_chat( if not config.enabled or not config.tracker: return None - provider = await AIProviderFactory.create(config, default_ai_provider) + provider = await RunnerFactory.create_model(config, default_ai_provider) if not provider: return None diff --git a/packages/sdk/server-ai/src/ldai/providers/__init__.py b/packages/sdk/server-ai/src/ldai/providers/__init__.py index 71efb6c..4cebeea 100644 --- a/packages/sdk/server-ai/src/ldai/providers/__init__.py +++ b/packages/sdk/server-ai/src/ldai/providers/__init__.py @@ -1,9 +1,9 @@ -"""AI Provider interfaces and factory for LaunchDarkly AI SDK.""" +"""AI Connector interfaces and factory for LaunchDarkly AI SDK.""" from ldai.providers.ai_provider import AIProvider -from ldai.providers.ai_provider_factory import AIProviderFactory +from ldai.providers.runner_factory import RunnerFactory __all__ = [ 'AIProvider', - 'AIProviderFactory', + 'RunnerFactory', ] diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py index 91c8cb9..a675eda 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -1,38 +1,37 @@ -"""Abstract base class for AI providers.""" +"""Abstract base class for AI connectors.""" -from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Union +from abc import ABC +from typing import Any, Dict, List, Optional from ldai import log -from ldai.models import AIConfigKind, LDMessage +from ldai.models import LDMessage from ldai.providers.types import ChatResponse, StructuredResponse class AIProvider(ABC): """ - Abstract base class for AI providers that implement chat model functionality. + Abstract base class for AI provider connectors. - This class provides the contract that all provider implementations must follow - to integrate with LaunchDarkly's tracking and configuration capabilities. + An AIProvider is a per-provider factory: it is instantiated once per provider + (with no arguments — credentials are read from environment variables) and is + responsible for constructing focused runtime capability objects via + create_model(), create_agent(), and create_agent_graph(). - Following the AICHAT spec recommendation to use base classes with non-abstract methods - for better extensibility and backwards compatibility. + The invoke_model() / invoke_structured_model() methods remain on this base + class for compatibility and will migrate to ModelExecutor in PR 2. """ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: """ Invoke the chat model with an array of messages. - This method should convert messages to provider format, invoke the model, - and return a ChatResponse with the result and metrics. - Default implementation takes no action and returns a placeholder response. - Provider implementations should override this method. + Connector implementations should override this method. :param messages: Array of LDMessage objects representing the conversation :return: ChatResponse containing the model's response """ - log.warn('invokeModel not implemented by this provider') + log.warn('invoke_model not implemented by this connector') from ldai.models import LDMessage from ldai.providers.types import LDAIMetrics @@ -50,17 +49,14 @@ async def invoke_structured_model( """ Invoke the chat model with structured output support. - This method should convert messages to provider format, invoke the model with - structured output configuration, and return a structured response. - Default implementation takes no action and returns a placeholder response. - Provider implementations should override this method. + Connector implementations should override this method. :param messages: Array of LDMessage objects representing the conversation :param response_structure: Dictionary of output configurations keyed by output name :return: StructuredResponse containing the structured data """ - log.warn('invokeStructuredModel not implemented by this provider') + log.warn('invoke_structured_model not implemented by this connector') from ldai.providers.types import LDAIMetrics @@ -70,16 +66,41 @@ async def invoke_structured_model( metrics=LDAIMetrics(success=False, usage=None), ) - @staticmethod - @abstractmethod - async def create(ai_config: AIConfigKind) -> 'AIProvider': + def create_model(self, config: Any) -> Optional['AIProvider']: + """ + Create a configured model executor for the given AI config. + + Default implementation warns. Provider connectors should override this method. + + :param config: The LaunchDarkly AI configuration + :return: Configured AIProvider instance, or None if unsupported + """ + log.warn('create_model not implemented by this connector') + return None + + def create_agent(self, config: Any, tools: Any) -> Optional[Any]: """ - Static method that constructs an instance of the provider. + Create a configured agent executor for the given AI config and tool registry. - Each provider implementation must provide their own static create method - that accepts an AIConfigKind and returns a configured instance. + Default implementation warns. Provider connectors should override this method. - :param ai_config: The LaunchDarkly AI configuration - :return: Configured provider instance + :param config: The LaunchDarkly AI agent configuration + :param tools: Tool registry mapping tool names to callables + :return: AgentExecutor instance, or None if unsupported """ - raise NotImplementedError('Provider implementations must override the static create method') + log.warn('create_agent not implemented by this connector') + return None + + def create_agent_graph(self, graph_def: Any, tools: Any) -> Optional[Any]: + """ + Create a configured agent graph executor for the given graph definition and tools. + + Default implementation warns. Provider connectors should override this method. + + :param graph_def: The agent graph definition + :param tools: Tool registry mapping tool names to callables + :return: AgentGraphExecutor instance, or None if unsupported + """ + log.warn('create_agent_graph not implemented by this connector') + return None + diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py deleted file mode 100644 index 74e55d5..0000000 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider_factory.py +++ /dev/null @@ -1,125 +0,0 @@ -"""Factory for creating AIProvider instances based on the provider configuration.""" - -from importlib import util -from typing import Any, Dict, List, Optional, Tuple, Type - -from ldai import log -from ldai.models import AIConfigKind -from ldai.providers.ai_provider import AIProvider - -# Supported AI providers -# Multi-provider packages should be last in the list -SUPPORTED_AI_PROVIDERS = ('openai', 'langchain') - - -class AIProviderFactory: - """ - Factory for creating AIProvider instances based on the provider configuration. - """ - - @staticmethod - async def create( - ai_config: AIConfigKind, - default_ai_provider: Optional[str] = None, - ) -> Optional[AIProvider]: - """ - Create an AIProvider instance based on the AI configuration. - - This method attempts to load provider-specific implementations dynamically. - Returns None if the provider is not supported. - - :param ai_config: The AI configuration - :param default_ai_provider: Optional default AI provider to use - :return: AIProvider instance or None if not supported - """ - provider_name = ai_config.provider.name.lower() if ai_config.provider else None - providers_to_try = AIProviderFactory._get_providers_to_try(default_ai_provider, provider_name) - - for provider_type in providers_to_try: - provider = await AIProviderFactory._try_create_provider(provider_type, ai_config) - if provider: - log.debug( - f"Successfully created AIProvider for: {provider_name} " - f"with provider type: {provider_type} for AIConfig: {ai_config.key}" - ) - return provider - - log.warn( - f"Provider is not supported or failed to initialize: {provider_name}" - ) - return None - - @staticmethod - def _get_providers_to_try( - default_ai_provider: Optional[str], - provider_name: Optional[str], - ) -> List[str]: - """ - Determine which providers to try based on default_ai_provider and provider_name. - - :param default_ai_provider: Optional default provider to use - :param provider_name: Optional provider name from config - :return: List of providers to try in order - """ - if default_ai_provider: - return [default_ai_provider] - - providers = [] - - if provider_name and provider_name in SUPPORTED_AI_PROVIDERS: - providers.append(provider_name) - - # Then try multi-provider packages, but avoid duplicates - multi_provider_packages: List[str] = ['langchain'] - for provider in multi_provider_packages: - if provider not in providers: - providers.append(provider) - - return providers - - @staticmethod - async def _try_create_provider( - provider_type: str, - ai_config: AIConfigKind, - ) -> Optional[AIProvider]: - """ - Try to create a provider of the specified type. - - :param provider_type: Type of provider to create - :param ai_config: AI configuration - :return: AIProvider instance or None if creation failed - """ - try: - if provider_type == 'langchain': - AIProviderFactory._pkg_exists('ldai_langchain') - from ldai_langchain import LangChainProvider - return await LangChainProvider.create(ai_config) - - if provider_type == 'openai': - AIProviderFactory._pkg_exists('ldai_openai') - from ldai_openai import OpenAIProvider - return await OpenAIProvider.create(ai_config) - - log.warn( - f"Provider {provider_type} is not supported. " - f"Supported providers are: {SUPPORTED_AI_PROVIDERS}" - ) - - return None - except ImportError as error: - log.warn( - f"Error creating {provider_type} provider: {error}. " - f"Make sure the {provider_type} package is installed." - ) - return None - - @staticmethod - def _pkg_exists(package_name: str) -> None: - """ - Check if a package exists. - - :param package_name: Name of the package to check - :return: None if the package exists, otherwise raises an ImportError - """ - if util.find_spec(package_name) is None: - raise ImportError(f"Package {package_name} not found") diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py new file mode 100644 index 0000000..f2e5fca --- /dev/null +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -0,0 +1,181 @@ +"""Factory for creating AIProvider instances and capability runners.""" + +from importlib import util +from typing import Any, Callable, List, Optional, TypeVar + +from ldai import log +from ldai.models import AIConfigKind +from ldai.providers.ai_provider import AIProvider + +T = TypeVar('T') + +# Supported AI providers. +# Multi-provider packages should be last in the list. +SUPPORTED_AI_PROVIDERS = ('openai', 'langchain') + + +class RunnerFactory: + """ + Sole entry point for capability creation. + + RunnerFactory instantiates the appropriate AIProvider for the configured + provider and delegates runner construction to it. The shared fallback + loop (_with_fallback) tries each candidate provider in order and returns + the first successful result. + """ + + @staticmethod + def _get_ai_adapter(provider_type: str) -> Optional[AIProvider]: + """ + Import and instantiate the AIProvider for the given provider type. + + This is the only place in the SDK that knows about connector package names. + + :param provider_type: Provider identifier, e.g. 'openai' or 'langchain' + :return: AIProvider instance, or None if the package is not installed + """ + try: + if provider_type == 'langchain': + RunnerFactory._pkg_exists('ldai_langchain') + from ldai_langchain import LangChainRunnerFactory + return LangChainRunnerFactory() + + if provider_type == 'openai': + RunnerFactory._pkg_exists('ldai_openai') + from ldai_openai import OpenAIProvider + return OpenAIProvider() + + log.warn( + f"Provider '{provider_type}' is not supported. " + f"Supported providers: {SUPPORTED_AI_PROVIDERS}" + ) + return None + except ImportError as error: + log.warn( + f"Could not load provider '{provider_type}': {error}. " + f"Make sure the corresponding package is installed." + ) + return None + + @staticmethod + def _with_fallback( + providers: List[str], + fn: Callable[[AIProvider], Optional[T]], + ) -> Optional[T]: + """ + Try each provider in order; return the first successful result. + + Shared by all create_* methods so the fallback loop is written once. + + :param providers: Ordered list of provider identifiers to try + :param fn: Callable that receives an AIProvider and returns a result or None + :return: First non-None result, or None if all providers fail + """ + for provider_type in providers: + try: + connector = RunnerFactory._get_ai_adapter(provider_type) + if connector is None: + continue + result = fn(connector) + if result is not None: + log.debug(f"Successfully created capability using provider '{provider_type}'") + return result + except Exception as exc: + log.warn(f"Provider '{provider_type}' failed: {exc}") + + log.warn("All providers failed or are unavailable") + return None + + @staticmethod + def _get_providers_to_try( + default_ai_provider: Optional[str], + provider_name: Optional[str], + ) -> List[str]: + """ + Determine which providers to try, in priority order. + + :param default_ai_provider: Caller-specified override (tried exclusively if set) + :param provider_name: Provider name from the AI config + :return: Ordered list of provider identifiers + """ + if default_ai_provider: + return [default_ai_provider] + + providers: List[str] = [] + + if provider_name and provider_name in SUPPORTED_AI_PROVIDERS: + providers.append(provider_name) + + # Multi-provider packages act as a fallback + for multi in ['langchain']: + if multi not in providers: + providers.append(multi) + + return providers + + # --- Public API --- + + @staticmethod + async def create_model( + config: AIConfigKind, + default_ai_provider: Optional[str] = None, + ) -> Optional[AIProvider]: + """ + Create a model executor for the given AI completion config. + + :param config: LaunchDarkly AI config (completion or judge) + :param default_ai_provider: Optional provider override ('openai', 'langchain', …) + :return: Configured AIProvider that can invoke_model(), or None + """ + provider_name = config.provider.name.lower() if config.provider else None + providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name) + return RunnerFactory._with_fallback(providers, lambda p: p.create_model(config)) + + @staticmethod + async def create_agent( + config: Any, + tools: Any, + default_ai_provider: Optional[str] = None, + ) -> Optional[Any]: + """ + Create an agent executor for the given AI agent config and tool registry. + + :param config: LaunchDarkly AI agent config + :param tools: Tool registry mapping tool names to callables + :param default_ai_provider: Optional provider override + :return: AgentExecutor instance, or None + """ + provider_name = config.provider.name.lower() if config.provider else None + providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name) + return RunnerFactory._with_fallback(providers, lambda p: p.create_agent(config, tools)) + + @staticmethod + async def create_agent_graph( + graph_def: Any, + tools: Any, + default_ai_provider: Optional[str] = None, + ) -> Optional[Any]: + """ + Create an agent graph executor for the given graph definition and tool registry. + + :param graph_def: AgentGraphDefinition instance + :param tools: Tool registry mapping tool names to callables + :param default_ai_provider: Optional provider override + :return: AgentGraphExecutor instance, or None + """ + provider_name = None + if graph_def.root() and graph_def.root().get_config() and graph_def.root().get_config().provider: + provider_name = graph_def.root().get_config().provider.name.lower() + providers = RunnerFactory._get_providers_to_try(default_ai_provider, provider_name) + return RunnerFactory._with_fallback(providers, lambda p: p.create_agent_graph(graph_def, tools)) + + @staticmethod + def _pkg_exists(package_name: str) -> None: + """ + Raise ImportError if the given package is not importable. + + :param package_name: Name of the package to check + """ + if util.find_spec(package_name) is None: + raise ImportError(f"Package '{package_name}' not found") + From f5ac6cc13ab4510c467490e1b6b7b71909c1cf76 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 25 Mar 2026 09:56:06 -0500 Subject: [PATCH 07/11] fix: Remove unnecessary async from RunnerFactory public methods Co-Authored-By: Claude Sonnet 4.6 --- packages/sdk/server-ai/src/ldai/client.py | 4 ++-- packages/sdk/server-ai/src/ldai/providers/runner_factory.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/sdk/server-ai/src/ldai/client.py b/packages/sdk/server-ai/src/ldai/client.py index a87bef0..d680199 100644 --- a/packages/sdk/server-ai/src/ldai/client.py +++ b/packages/sdk/server-ai/src/ldai/client.py @@ -245,7 +245,7 @@ async def create_judge( if not judge_config.enabled or not judge_config.tracker: return None - provider = await RunnerFactory.create_model(judge_config, default_ai_provider) + provider = RunnerFactory.create_model(judge_config, default_ai_provider) if not provider: return None @@ -346,7 +346,7 @@ async def create_chat( if not config.enabled or not config.tracker: return None - provider = await RunnerFactory.create_model(config, default_ai_provider) + provider = RunnerFactory.create_model(config, default_ai_provider) if not provider: return None diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index f2e5fca..b4282dc 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -116,7 +116,7 @@ def _get_providers_to_try( # --- Public API --- @staticmethod - async def create_model( + def create_model( config: AIConfigKind, default_ai_provider: Optional[str] = None, ) -> Optional[AIProvider]: @@ -132,7 +132,7 @@ async def create_model( return RunnerFactory._with_fallback(providers, lambda p: p.create_model(config)) @staticmethod - async def create_agent( + def create_agent( config: Any, tools: Any, default_ai_provider: Optional[str] = None, @@ -150,7 +150,7 @@ async def create_agent( return RunnerFactory._with_fallback(providers, lambda p: p.create_agent(config, tools)) @staticmethod - async def create_agent_graph( + def create_agent_graph( graph_def: Any, tools: Any, default_ai_provider: Optional[str] = None, From 3baae96b9cbf8e459984a7dc9ad2eb14a53f0b11 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 25 Mar 2026 11:18:32 -0500 Subject: [PATCH 08/11] Remove non-idiomatic module docstrings; replace connector with provider MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove module-level docstring strings from all modified files (not idiomatic Python) - Replace all "connector" terminology with "provider" in class docstrings, method docstrings, and log warning messages - Fix stale "ExecutorFactory" references in provider class docstrings → RunnerFactory - Add assert guards for Optional[BaseChatModel] before invocation in LangChainRunnerFactory to satisfy mypy - Fix trailing blank lines (W391) across modified files Co-Authored-By: Claude Sonnet 4.6 --- .../src/ldai_langchain/__init__.py | 2 -- .../langchain_runner_factory.py | 17 ++++++------- .../src/ldai_openai/__init__.py | 2 -- .../src/ldai_openai/openai_runner_factory.py | 15 +++++------ .../server-ai/src/ldai/providers/__init__.py | 2 -- .../src/ldai/providers/ai_provider.py | 25 ++++++++----------- .../src/ldai/providers/runner_factory.py | 21 +++++++--------- 7 files changed, 34 insertions(+), 50 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py index ee58f4e..fab6cc5 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/__init__.py @@ -1,5 +1,3 @@ -"""LaunchDarkly AI SDK - LangChain Connector.""" - from ldai_langchain.langchain_runner_factory import LangChainRunnerFactory __version__ = "0.1.0" diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py index 78803a0..d6ad6fb 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py @@ -1,5 +1,3 @@ -"""LangChain connector for LaunchDarkly AI SDK.""" - from typing import Any, Dict, List, Optional, Union from langchain_core.language_models.chat_models import BaseChatModel @@ -13,10 +11,10 @@ class LangChainRunnerFactory(AIProvider): """ - LangChain connector for the LaunchDarkly AI SDK. + LangChain provider for the LaunchDarkly AI SDK. Can be used in two ways: - - Transparently via ExecutorFactory (pass ``default_ai_provider='langchain'`` to + - Transparently via RunnerFactory (pass ``default_ai_provider='langchain'`` to ``create_model()`` / ``create_chat()``). - Directly for full control: instantiate with a ``BaseChatModel``, then call ``invoke_model()`` yourself and use the static convenience methods @@ -26,12 +24,12 @@ class LangChainRunnerFactory(AIProvider): def __init__(self, llm: Optional[BaseChatModel] = None): """ - Initialize the LangChain connector. + Initialize the LangChain provider. - When called with no arguments the connector acts as a per-provider factory + When called with no arguments the provider acts as a per-provider factory — call ``create_model(config)`` to obtain a configured instance. - When called with an explicit ``llm`` the connector is ready to invoke + When called with an explicit ``llm`` the provider is ready to invoke the model immediately. :param llm: A LangChain BaseChatModel instance (optional) @@ -42,7 +40,7 @@ def __init__(self, llm: Optional[BaseChatModel] = None): def create_model(self, config: AIConfigKind) -> 'LangChainRunnerFactory': """ - Create a configured LangChain model connector for the given AI config. + Create a configured LangChain model provider for the given AI config. :param config: The LaunchDarkly AI configuration :return: Configured LangChainRunnerFactory ready to invoke the model @@ -60,6 +58,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: :return: ChatResponse containing the model's response and metrics """ try: + assert self._llm is not None langchain_messages = LangChainRunnerFactory.convert_messages_to_langchain(messages) response: BaseMessage = await self._llm.ainvoke(langchain_messages) metrics = LangChainRunnerFactory.get_ai_metrics_from_response(response) @@ -104,6 +103,7 @@ async def invoke_structured_model( metrics=LDAIMetrics(success=False, usage=None), ) try: + assert self._llm is not None langchain_messages = LangChainRunnerFactory.convert_messages_to_langchain(messages) structured_llm = self._llm.with_structured_output(response_structure, include_raw=True) response = await structured_llm.ainvoke(langchain_messages) @@ -260,4 +260,3 @@ def create_langchain_model(ai_config: AIConfigKind) -> BaseChatModel: model_provider=mapped_provider, **parameters, ) - diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py index 1284f48..8881353 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py @@ -1,5 +1,3 @@ -"""LaunchDarkly AI SDK OpenAI Connector.""" - from ldai_openai.openai_runner_factory import OpenAIProvider __all__ = [ diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 1313404..19f0042 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -1,5 +1,3 @@ -"""OpenAI connector for LaunchDarkly AI SDK.""" - import json import os from typing import Any, Dict, Iterable, List, Optional, cast @@ -15,10 +13,10 @@ class OpenAIProvider(AIProvider): """ - OpenAI connector for the LaunchDarkly AI SDK. + OpenAI provider for the LaunchDarkly AI SDK. Can be used in two ways: - - Transparently via ExecutorFactory (pass ``default_ai_provider='openai'`` to + - Transparently via RunnerFactory (pass ``default_ai_provider='openai'`` to ``create_model()`` / ``create_chat()``). - Directly for full control: instantiate with an ``AsyncOpenAI`` client, model name, and parameters, then call ``invoke_model()`` yourself. @@ -31,13 +29,13 @@ def __init__( parameters: Optional[Dict[str, Any]] = None, ): """ - Initialize the OpenAI connector. + Initialize the OpenAI provider. - When called with no arguments the connector reads credentials from the + When called with no arguments the provider reads credentials from the environment (``OPENAI_API_KEY``) and acts as a per-provider factory — call ``create_model(config)`` to obtain a configured instance. - When called with explicit arguments the connector is ready to invoke + When called with explicit arguments the provider is ready to invoke the model immediately. :param client: An AsyncOpenAI client instance (created from env if omitted) @@ -54,7 +52,7 @@ def __init__( def create_model(self, config: AIConfigKind) -> 'OpenAIProvider': """ - Create a configured OpenAI model connector for the given AI config. + Create a configured OpenAI model provider for the given AI config. Reuses the underlying AsyncOpenAI client so that connection pooling is preserved across calls. @@ -220,4 +218,3 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: ) return LDAIMetrics(success=True, usage=usage) - diff --git a/packages/sdk/server-ai/src/ldai/providers/__init__.py b/packages/sdk/server-ai/src/ldai/providers/__init__.py index 4cebeea..cbf2b5f 100644 --- a/packages/sdk/server-ai/src/ldai/providers/__init__.py +++ b/packages/sdk/server-ai/src/ldai/providers/__init__.py @@ -1,5 +1,3 @@ -"""AI Connector interfaces and factory for LaunchDarkly AI SDK.""" - from ldai.providers.ai_provider import AIProvider from ldai.providers.runner_factory import RunnerFactory diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py index a675eda..af1fb91 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -1,5 +1,3 @@ -"""Abstract base class for AI connectors.""" - from abc import ABC from typing import Any, Dict, List, Optional @@ -10,7 +8,7 @@ class AIProvider(ABC): """ - Abstract base class for AI provider connectors. + Abstract base class for AI providers. An AIProvider is a per-provider factory: it is instantiated once per provider (with no arguments — credentials are read from environment variables) and is @@ -26,12 +24,12 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: Invoke the chat model with an array of messages. Default implementation takes no action and returns a placeholder response. - Connector implementations should override this method. + Provider implementations should override this method. :param messages: Array of LDMessage objects representing the conversation :return: ChatResponse containing the model's response """ - log.warn('invoke_model not implemented by this connector') + log.warn('invoke_model not implemented by this provider') from ldai.models import LDMessage from ldai.providers.types import LDAIMetrics @@ -50,13 +48,13 @@ async def invoke_structured_model( Invoke the chat model with structured output support. Default implementation takes no action and returns a placeholder response. - Connector implementations should override this method. + Provider implementations should override this method. :param messages: Array of LDMessage objects representing the conversation :param response_structure: Dictionary of output configurations keyed by output name :return: StructuredResponse containing the structured data """ - log.warn('invoke_structured_model not implemented by this connector') + log.warn('invoke_structured_model not implemented by this provider') from ldai.providers.types import LDAIMetrics @@ -70,37 +68,36 @@ def create_model(self, config: Any) -> Optional['AIProvider']: """ Create a configured model executor for the given AI config. - Default implementation warns. Provider connectors should override this method. + Default implementation warns. Provider implementations should override this method. :param config: The LaunchDarkly AI configuration :return: Configured AIProvider instance, or None if unsupported """ - log.warn('create_model not implemented by this connector') + log.warn('create_model not implemented by this provider') return None def create_agent(self, config: Any, tools: Any) -> Optional[Any]: """ Create a configured agent executor for the given AI config and tool registry. - Default implementation warns. Provider connectors should override this method. + Default implementation warns. Provider implementations should override this method. :param config: The LaunchDarkly AI agent configuration :param tools: Tool registry mapping tool names to callables :return: AgentExecutor instance, or None if unsupported """ - log.warn('create_agent not implemented by this connector') + log.warn('create_agent not implemented by this provider') return None def create_agent_graph(self, graph_def: Any, tools: Any) -> Optional[Any]: """ Create a configured agent graph executor for the given graph definition and tools. - Default implementation warns. Provider connectors should override this method. + Default implementation warns. Provider implementations should override this method. :param graph_def: The agent graph definition :param tools: Tool registry mapping tool names to callables :return: AgentGraphExecutor instance, or None if unsupported """ - log.warn('create_agent_graph not implemented by this connector') + log.warn('create_agent_graph not implemented by this provider') return None - diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index b4282dc..7973347 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -1,5 +1,3 @@ -"""Factory for creating AIProvider instances and capability runners.""" - from importlib import util from typing import Any, Callable, List, Optional, TypeVar @@ -25,11 +23,11 @@ class RunnerFactory: """ @staticmethod - def _get_ai_adapter(provider_type: str) -> Optional[AIProvider]: + def _get_provider_factory(provider_type: str) -> Optional[AIProvider]: """ Import and instantiate the AIProvider for the given provider type. - This is the only place in the SDK that knows about connector package names. + This is the only place in the SDK that knows about provider package names. :param provider_type: Provider identifier, e.g. 'openai' or 'langchain' :return: AIProvider instance, or None if the package is not installed @@ -45,13 +43,13 @@ def _get_ai_adapter(provider_type: str) -> Optional[AIProvider]: from ldai_openai import OpenAIProvider return OpenAIProvider() - log.warn( + log.warning( f"Provider '{provider_type}' is not supported. " f"Supported providers: {SUPPORTED_AI_PROVIDERS}" ) return None except ImportError as error: - log.warn( + log.warning( f"Could not load provider '{provider_type}': {error}. " f"Make sure the corresponding package is installed." ) @@ -73,17 +71,17 @@ def _with_fallback( """ for provider_type in providers: try: - connector = RunnerFactory._get_ai_adapter(provider_type) - if connector is None: + provider_factory = RunnerFactory._get_provider_factory(provider_type) + if provider_factory is None: continue - result = fn(connector) + result = fn(provider_factory) if result is not None: log.debug(f"Successfully created capability using provider '{provider_type}'") return result except Exception as exc: - log.warn(f"Provider '{provider_type}' failed: {exc}") + log.warning(f"Provider '{provider_type}' failed: {exc}") - log.warn("All providers failed or are unavailable") + log.warning("All providers failed or are unavailable") return None @staticmethod @@ -178,4 +176,3 @@ def _pkg_exists(package_name: str) -> None: """ if util.find_spec(package_name) is None: raise ImportError(f"Package '{package_name}' not found") - From faef4e7328ffbfd0f09867e6f5bba698489bba42 Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 25 Mar 2026 11:47:08 -0500 Subject: [PATCH 09/11] =?UTF-8?q?Rename=20OpenAIProvider=20=E2=86=92=20Ope?= =?UTF-8?q?nAIRunnerFactory?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Completes the rename that was done for the file (openai_provider.py → openai_runner_factory.py) but missed the class itself. Updates all references in __init__.py, runner_factory.py, and tests. Co-Authored-By: Claude Sonnet 4.6 --- .../src/ldai_openai/__init__.py | 4 +-- .../src/ldai_openai/openai_runner_factory.py | 14 ++++---- .../tests/test_openai_provider.py | 36 +++++++++---------- .../src/ldai/providers/runner_factory.py | 4 +-- 4 files changed, 29 insertions(+), 29 deletions(-) diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py index 8881353..a24989f 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/__init__.py @@ -1,5 +1,5 @@ -from ldai_openai.openai_runner_factory import OpenAIProvider +from ldai_openai.openai_runner_factory import OpenAIRunnerFactory __all__ = [ - 'OpenAIProvider', + 'OpenAIRunnerFactory', ] diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 19f0042..86bef3f 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -11,7 +11,7 @@ from openai.types.chat import ChatCompletionMessageParam -class OpenAIProvider(AIProvider): +class OpenAIRunnerFactory(AIProvider): """ OpenAI provider for the LaunchDarkly AI SDK. @@ -50,7 +50,7 @@ def __init__( # --- AIProvider factory methods --- - def create_model(self, config: AIConfigKind) -> 'OpenAIProvider': + def create_model(self, config: AIConfigKind) -> 'OpenAIRunnerFactory': """ Create a configured OpenAI model provider for the given AI config. @@ -58,13 +58,13 @@ def create_model(self, config: AIConfigKind) -> 'OpenAIProvider': preserved across calls. :param config: The LaunchDarkly AI configuration - :return: Configured OpenAIProvider ready to invoke the model + :return: Configured OpenAIRunnerFactory ready to invoke the model """ config_dict = config.to_dict() model_dict = config_dict.get('model') or {} model_name = model_dict.get('name', '') parameters = model_dict.get('parameters') or {} - return OpenAIProvider(self._client, model_name, parameters) + return OpenAIRunnerFactory(self._client, model_name, parameters) # --- Model invocation --- @@ -87,7 +87,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: **self._parameters, ) - metrics = OpenAIProvider.get_ai_metrics_from_response(response) + metrics = OpenAIRunnerFactory.get_ai_metrics_from_response(response) content = '' if response.choices and len(response.choices) > 0: @@ -143,7 +143,7 @@ async def invoke_structured_model( **self._parameters, ) - metrics = OpenAIProvider.get_ai_metrics_from_response(response) + metrics = OpenAIRunnerFactory.get_ai_metrics_from_response(response) content = '' if response.choices and len(response.choices) > 0: @@ -206,7 +206,7 @@ def get_ai_metrics_from_response(response: Any) -> LDAIMetrics: response = await tracker.track_metrics_of( lambda: client.chat.completions.create(config), - OpenAIProvider.get_ai_metrics_from_response + OpenAIRunnerFactory.get_ai_metrics_from_response ) """ usage: Optional[TokenUsage] = None diff --git a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py index d684df0..385f74a 100644 --- a/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py +++ b/packages/ai-providers/server-ai-openai/tests/test_openai_provider.py @@ -5,7 +5,7 @@ from ldai import LDMessage -from ldai_openai import OpenAIProvider +from ldai_openai import OpenAIRunnerFactory class TestGetAIMetricsFromResponse: @@ -19,7 +19,7 @@ def test_creates_metrics_with_success_true_and_token_usage(self): mock_response.usage.completion_tokens = 50 mock_response.usage.total_tokens = 100 - result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + result = OpenAIRunnerFactory.get_ai_metrics_from_response(mock_response) assert result.success is True assert result.usage is not None @@ -32,7 +32,7 @@ def test_creates_metrics_with_success_true_and_no_usage_when_usage_missing(self) mock_response = MagicMock() mock_response.usage = None - result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + result = OpenAIRunnerFactory.get_ai_metrics_from_response(mock_response) assert result.success is True assert result.usage is None @@ -45,7 +45,7 @@ def test_handles_partial_usage_data(self): mock_response.usage.completion_tokens = None mock_response.usage.total_tokens = None - result = OpenAIProvider.get_ai_metrics_from_response(mock_response) + result = OpenAIRunnerFactory.get_ai_metrics_from_response(mock_response) assert result.success is True assert result.usage is not None @@ -78,7 +78,7 @@ async def test_invokes_openai_chat_completions_and_returns_response(self, mock_c mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) @@ -108,7 +108,7 @@ async def test_returns_unsuccessful_response_when_no_content(self, mock_client): mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) @@ -127,7 +127,7 @@ async def test_returns_unsuccessful_response_when_choices_empty(self, mock_clien mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) @@ -142,7 +142,7 @@ async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_cl mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Hello!')] result = await provider.invoke_model(messages) @@ -175,7 +175,7 @@ async def test_invokes_openai_with_structured_output(self, mock_client): mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = { 'type': 'object', @@ -210,7 +210,7 @@ async def test_returns_unsuccessful_when_no_content_in_structured_response(self, mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} @@ -236,7 +236,7 @@ async def test_handles_json_parsing_errors(self, mock_client): mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(return_value=mock_response) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} @@ -255,7 +255,7 @@ async def test_returns_unsuccessful_response_when_exception_thrown(self, mock_cl mock_client.chat.completions = MagicMock() mock_client.chat.completions.create = AsyncMock(side_effect=Exception('API Error')) - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) messages = [LDMessage(role='user', content='Tell me about a person')] response_structure = {'type': 'object'} @@ -272,7 +272,7 @@ class TestGetClient: def test_returns_underlying_client(self): """Should return the underlying OpenAI client.""" mock_client = MagicMock() - provider = OpenAIProvider(mock_client, 'gpt-3.5-turbo', {}) + provider = OpenAIRunnerFactory(mock_client, 'gpt-3.5-turbo', {}) assert provider.get_client() is mock_client @@ -281,7 +281,7 @@ class TestCreateModel: """Tests for create_model instance method.""" def test_creates_connector_with_correct_model_and_parameters(self): - """Should create OpenAIProvider with correct model and parameters.""" + """Should create OpenAIRunnerFactory with correct model and parameters.""" mock_ai_config = MagicMock() mock_ai_config.to_dict.return_value = { 'model': { @@ -298,9 +298,9 @@ def test_creates_connector_with_correct_model_and_parameters(self): mock_client = MagicMock() mock_openai_class.return_value = mock_client - result = OpenAIProvider().create_model(mock_ai_config) + result = OpenAIRunnerFactory().create_model(mock_ai_config) - assert isinstance(result, OpenAIProvider) + assert isinstance(result, OpenAIRunnerFactory) assert result._model_name == 'gpt-4' assert result._parameters == {'temperature': 0.7, 'max_tokens': 1000} @@ -313,9 +313,9 @@ def test_handles_missing_model_config(self): mock_client = MagicMock() mock_openai_class.return_value = mock_client - result = OpenAIProvider().create_model(mock_ai_config) + result = OpenAIRunnerFactory().create_model(mock_ai_config) - assert isinstance(result, OpenAIProvider) + assert isinstance(result, OpenAIRunnerFactory) assert result._model_name == '' assert result._parameters == {} diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index 7973347..34028a8 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -40,8 +40,8 @@ def _get_provider_factory(provider_type: str) -> Optional[AIProvider]: if provider_type == 'openai': RunnerFactory._pkg_exists('ldai_openai') - from ldai_openai import OpenAIProvider - return OpenAIProvider() + from ldai_openai import OpenAIRunnerFactory + return OpenAIRunnerFactory() log.warning( f"Provider '{provider_type}' is not supported. " From cb64cd376e449c73fea0d325bb6417ab20f4384b Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 25 Mar 2026 11:49:35 -0500 Subject: [PATCH 10/11] Remove non-idiomatic section header comments Drop # --- AIProvider factory methods ---, # --- Model invocation ---, # --- Convenience accessors ---, and # --- Public API --- from the factory and provider classes. Co-Authored-By: Claude Sonnet 4.6 --- .../src/ldai_langchain/langchain_runner_factory.py | 6 ------ .../src/ldai_openai/openai_runner_factory.py | 6 ------ packages/sdk/server-ai/src/ldai/providers/runner_factory.py | 2 -- 3 files changed, 14 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py index d6ad6fb..d8ac57f 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py @@ -36,8 +36,6 @@ def __init__(self, llm: Optional[BaseChatModel] = None): """ self._llm = llm - # --- AIProvider factory methods --- - def create_model(self, config: AIConfigKind) -> 'LangChainRunnerFactory': """ Create a configured LangChain model provider for the given AI config. @@ -48,8 +46,6 @@ def create_model(self, config: AIConfigKind) -> 'LangChainRunnerFactory': llm = LangChainRunnerFactory.create_langchain_model(config) return LangChainRunnerFactory(llm) - # --- Model invocation --- - async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: """ Invoke the LangChain model with an array of messages. @@ -132,8 +128,6 @@ async def invoke_structured_model( log.warning(f'LangChain structured model invocation failed: {error}') return structured_response - # --- Convenience accessors --- - def get_chat_model(self) -> Optional[BaseChatModel]: """ Get the underlying LangChain model instance. diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 86bef3f..839b487 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -48,8 +48,6 @@ def __init__( self._model_name = model_name self._parameters = parameters or {} - # --- AIProvider factory methods --- - def create_model(self, config: AIConfigKind) -> 'OpenAIRunnerFactory': """ Create a configured OpenAI model provider for the given AI config. @@ -66,8 +64,6 @@ def create_model(self, config: AIConfigKind) -> 'OpenAIRunnerFactory': parameters = model_dict.get('parameters') or {} return OpenAIRunnerFactory(self._client, model_name, parameters) - # --- Model invocation --- - async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: """ Invoke the OpenAI model with an array of messages. @@ -184,8 +180,6 @@ async def invoke_structured_model( metrics=LDAIMetrics(success=False, usage=None), ) - # --- Convenience accessors --- - def get_client(self) -> AsyncOpenAI: """ Get the underlying OpenAI client instance. diff --git a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py index 34028a8..adbcb25 100644 --- a/packages/sdk/server-ai/src/ldai/providers/runner_factory.py +++ b/packages/sdk/server-ai/src/ldai/providers/runner_factory.py @@ -111,8 +111,6 @@ def _get_providers_to_try( return providers - # --- Public API --- - @staticmethod def create_model( config: AIConfigKind, From 22f67de755b786a56643cef58c2cfe36071ada5b Mon Sep 17 00:00:00 2001 From: jsonbailey Date: Wed, 25 Mar 2026 12:01:24 -0500 Subject: [PATCH 11/11] Clean up docstrings in provider classes - Remove stale PR 2 migration note from AIProvider docstring - Remove 'two ways' prose from runner factory class and __init__ docstrings - Replace log.warn with log.warning in ai_provider.py Co-Authored-By: Claude Sonnet 4.6 --- .../langchain_runner_factory.py | 20 +------------------ .../src/ldai_openai/openai_runner_factory.py | 19 +----------------- .../src/ldai/providers/ai_provider.py | 13 +++++------- 3 files changed, 7 insertions(+), 45 deletions(-) diff --git a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py index d8ac57f..afb5c8b 100644 --- a/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py +++ b/packages/ai-providers/server-ai-langchain/src/ldai_langchain/langchain_runner_factory.py @@ -10,28 +10,10 @@ class LangChainRunnerFactory(AIProvider): - """ - LangChain provider for the LaunchDarkly AI SDK. - - Can be used in two ways: - - Transparently via RunnerFactory (pass ``default_ai_provider='langchain'`` to - ``create_model()`` / ``create_chat()``). - - Directly for full control: instantiate with a ``BaseChatModel``, then call - ``invoke_model()`` yourself and use the static convenience methods - (``get_ai_metrics_from_response``, ``convert_messages_to_langchain``, - ``map_provider``, ``create_langchain_model``). - """ + """LangChain provider for the LaunchDarkly AI SDK.""" def __init__(self, llm: Optional[BaseChatModel] = None): """ - Initialize the LangChain provider. - - When called with no arguments the provider acts as a per-provider factory - — call ``create_model(config)`` to obtain a configured instance. - - When called with an explicit ``llm`` the provider is ready to invoke - the model immediately. - :param llm: A LangChain BaseChatModel instance (optional) """ self._llm = llm diff --git a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py index 839b487..a5ae0ff 100644 --- a/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py +++ b/packages/ai-providers/server-ai-openai/src/ldai_openai/openai_runner_factory.py @@ -12,15 +12,7 @@ class OpenAIRunnerFactory(AIProvider): - """ - OpenAI provider for the LaunchDarkly AI SDK. - - Can be used in two ways: - - Transparently via RunnerFactory (pass ``default_ai_provider='openai'`` to - ``create_model()`` / ``create_chat()``). - - Directly for full control: instantiate with an ``AsyncOpenAI`` client, - model name, and parameters, then call ``invoke_model()`` yourself. - """ + """OpenAI provider for the LaunchDarkly AI SDK.""" def __init__( self, @@ -29,15 +21,6 @@ def __init__( parameters: Optional[Dict[str, Any]] = None, ): """ - Initialize the OpenAI provider. - - When called with no arguments the provider reads credentials from the - environment (``OPENAI_API_KEY``) and acts as a per-provider factory — - call ``create_model(config)`` to obtain a configured instance. - - When called with explicit arguments the provider is ready to invoke - the model immediately. - :param client: An AsyncOpenAI client instance (created from env if omitted) :param model_name: The name of the model to use :param parameters: Additional model parameters diff --git a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py index af1fb91..fe2d0fb 100644 --- a/packages/sdk/server-ai/src/ldai/providers/ai_provider.py +++ b/packages/sdk/server-ai/src/ldai/providers/ai_provider.py @@ -14,9 +14,6 @@ class AIProvider(ABC): (with no arguments — credentials are read from environment variables) and is responsible for constructing focused runtime capability objects via create_model(), create_agent(), and create_agent_graph(). - - The invoke_model() / invoke_structured_model() methods remain on this base - class for compatibility and will migrate to ModelExecutor in PR 2. """ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: @@ -29,7 +26,7 @@ async def invoke_model(self, messages: List[LDMessage]) -> ChatResponse: :param messages: Array of LDMessage objects representing the conversation :return: ChatResponse containing the model's response """ - log.warn('invoke_model not implemented by this provider') + log.warning('invoke_model not implemented by this provider') from ldai.models import LDMessage from ldai.providers.types import LDAIMetrics @@ -54,7 +51,7 @@ async def invoke_structured_model( :param response_structure: Dictionary of output configurations keyed by output name :return: StructuredResponse containing the structured data """ - log.warn('invoke_structured_model not implemented by this provider') + log.warning('invoke_structured_model not implemented by this provider') from ldai.providers.types import LDAIMetrics @@ -73,7 +70,7 @@ def create_model(self, config: Any) -> Optional['AIProvider']: :param config: The LaunchDarkly AI configuration :return: Configured AIProvider instance, or None if unsupported """ - log.warn('create_model not implemented by this provider') + log.warning('create_model not implemented by this provider') return None def create_agent(self, config: Any, tools: Any) -> Optional[Any]: @@ -86,7 +83,7 @@ def create_agent(self, config: Any, tools: Any) -> Optional[Any]: :param tools: Tool registry mapping tool names to callables :return: AgentExecutor instance, or None if unsupported """ - log.warn('create_agent not implemented by this provider') + log.warning('create_agent not implemented by this provider') return None def create_agent_graph(self, graph_def: Any, tools: Any) -> Optional[Any]: @@ -99,5 +96,5 @@ def create_agent_graph(self, graph_def: Any, tools: Any) -> Optional[Any]: :param tools: Tool registry mapping tool names to callables :return: AgentGraphExecutor instance, or None if unsupported """ - log.warn('create_agent_graph not implemented by this provider') + log.warning('create_agent_graph not implemented by this provider') return None