diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7f554478..7f1fa0b4 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.3.3-rc1" + ".": "2.4.0-rc1" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 33ea12a4..a774d25b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 33 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/writerai%2Fwriter-ea6ec4b34f6b7fdecc564f59b2e31482eee05830bf8dc1f389461b158de1548e.yml openapi_spec_hash: ea89c1faed473908be2740efe6da255f -config_hash: 886645f89dc98f04b8931eaf02854e5f +config_hash: 247c2ce23a36ef7446d356308329c87b diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d557021..8da6b00c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,36 @@ # Changelog +## 2.4.0-rc1 (2026-01-12) + +Full Changelog: [v2.3.3-rc1...v2.4.0-rc1](https://github.com/writer/writer-python/compare/v2.3.3-rc1...v2.4.0-rc1) + +### Features + +* **api:** manual updates ([26cd543](https://github.com/writer/writer-python/commit/26cd543764608a8d44ac5415a89ea98953672e08)) +* **api:** manual updates ([f95b625](https://github.com/writer/writer-python/commit/f95b6253dbc413b744cc5943c383b2ecbc0ccd44)) + + +### Bug Fixes + +* **types:** allow pyright to infer TypedDict types within SequenceNotStr ([d1f6535](https://github.com/writer/writer-python/commit/d1f6535dcef1167e31b53c4e0c030e9f7459821b)) +* use async_to_httpx_files in patch method ([a6fbc3c](https://github.com/writer/writer-python/commit/a6fbc3c52c4afe6ce8ce234ac5c9d8464bae2463)) + + +### Chores + +* add missing docstrings ([a3c938c](https://github.com/writer/writer-python/commit/a3c938c5ad76f43870d9f48145d5a10a7757bcd1)) +* **docs:** use environment variables for authentication in code snippets ([534af15](https://github.com/writer/writer-python/commit/534af15961290aaae496780d712eaffa82d569e8)) +* **internal:** add `--fix` argument to lint script ([4b3b067](https://github.com/writer/writer-python/commit/4b3b06799d69c1bf79a1cc92b3d4083a1f0317bc)) +* **internal:** add missing files argument to base client ([9672ca2](https://github.com/writer/writer-python/commit/9672ca2a93a1f8b2545da624a88109e9d7f7bcc3)) +* **internal:** codegen related update ([ddab89d](https://github.com/writer/writer-python/commit/ddab89ddfaf2901c80213110584ed6509001c96b)) +* speedup initial import ([4ae973c](https://github.com/writer/writer-python/commit/4ae973c1bd3100e4a071f18f0d46ab22684eb9ef)) +* update lockfile ([abd7b3d](https://github.com/writer/writer-python/commit/abd7b3dd4ab2ab0f64359a806fd8aafc16f476dc)) + + +### Documentation + +* prominently feature MCP server setup in root SDK readmes ([8ed50d2](https://github.com/writer/writer-python/commit/8ed50d2ee1c708254dc4d8c34fc563eed7b946dd)) + ## 2.3.3-rc1 (2025-12-01) Full Changelog: [v2.3.2...v2.3.3-rc1](https://github.com/writer/writer-python/compare/v2.3.2...v2.3.3-rc1) diff --git a/LICENSE b/LICENSE index 38b16626..a1e82cd6 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2025 Writer + Copyright 2026 Writer Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index a1a33706..a03f8eee 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,15 @@ of Writer into your projects. It is generated with [Stainless](https://www.stainless.com/). +## MCP Server + +Use the Writer MCP Server to enable AI assistants to interact with this API, allowing them to explore endpoints, make test requests, and use documentation to help integrate this SDK into your application. + +[![Add to Cursor](https://cursor.com/deeplink/mcp-install-dark.svg)](https://cursor.com/en-US/install-mcp?name=writer-sdk-mcp&config=eyJjb21tYW5kIjoibnB4IiwiYXJncyI6WyIteSIsIndyaXRlci1zZGstbWNwIl19) +[![Install in VS Code](https://img.shields.io/badge/_-Add_to_VS_Code-blue?style=for-the-badge&logo=data:image/svg%2bxml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIGZpbGw9Im5vbmUiIHZpZXdCb3g9IjAgMCA0MCA0MCI+PHBhdGggZmlsbD0iI0VFRSIgZmlsbC1ydWxlPSJldmVub2RkIiBkPSJNMzAuMjM1IDM5Ljg4NGEyLjQ5MSAyLjQ5MSAwIDAgMS0xLjc4MS0uNzNMMTIuNyAyNC43OGwtMy40NiAyLjYyNC0zLjQwNiAyLjU4MmExLjY2NSAxLjY2NSAwIDAgMS0xLjA4Mi4zMzggMS42NjQgMS42NjQgMCAwIDEtMS4wNDYtLjQzMWwtMi4yLTJhMS42NjYgMS42NjYgMCAwIDEgMC0yLjQ2M0w3LjQ1OCAyMCA0LjY3IDE3LjQ1MyAxLjUwNyAxNC41N2ExLjY2NSAxLjY2NSAwIDAgMSAwLTIuNDYzbDIuMi0yYTEuNjY1IDEuNjY1IDAgMCAxIDIuMTMtLjA5N2w2Ljg2MyA1LjIwOUwyOC40NTIuODQ0YTIuNDg4IDIuNDg4IDAgMCAxIDEuODQxLS43MjljLjM1MS4wMDkuNjk5LjA5MSAxLjAxOS4yNDVsOC4yMzYgMy45NjFhMi41IDIuNSAwIDAgMSAxLjQxNSAyLjI1M3YuMDk5LS4wNDVWMzMuMzd2LS4wNDUuMDk1YTIuNTAxIDIuNTAxIDAgMCAxLTEuNDE2IDIuMjU3bC04LjIzNSAzLjk2MWEyLjQ5MiAyLjQ5MiAwIDAgMS0xLjA3Ny4yNDZabS43MTYtMjguOTQ3LTExLjk0OCA5LjA2MiAxMS45NTIgOS4wNjUtLjAwNC0xOC4xMjdaIi8+PC9zdmc+)](https://vscode.stainless.com/mcp/%7B%22name%22%3A%22writer-sdk-mcp%22%2C%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22writer-sdk-mcp%22%5D%7D) + +> Note: You may need to set environment variables in your MCP client. + ## Documentation The REST API documentation can be found on [dev.writer.com](https://dev.writer.com/api-guides/introduction). The full API of this library can be found in [api.md](api.md). @@ -122,6 +131,7 @@ pip install --pre writer-sdk[aiohttp] Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: ```python +import os import asyncio from writerai import DefaultAioHttpClient from writerai import AsyncWriter @@ -129,7 +139,7 @@ from writerai import AsyncWriter async def main() -> None: async with AsyncWriter( - api_key="My API Key", + api_key=os.environ.get("WRITER_API_KEY"), # This is the default and can be omitted http_client=DefaultAioHttpClient(), ) as client: chat_completion = await client.chat.chat( @@ -373,9 +383,9 @@ Error codes are as follows: ### Retries -The library automatically retries certain errors two times by default, with a short exponential backoff. -Connection errors (for example, due to a network connectivity problem), `408 Request Timeout`, `409 Conflict`, -`429 Rate Limit`, and `>=500 Internal errors` are all retried by default. +Certain errors are automatically retried 7 times by default, with a short exponential backoff. +Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, +429 Rate Limit, and >=500 Internal errors are all retried by default. You can use the `max_retries` option to configure or disable retry settings: diff --git a/pyproject.toml b/pyproject.toml index dd7ef0f5..4a01ce3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,22 +1,24 @@ [project] name = "writer-sdk" -version = "2.3.3-rc1" +version = "2.4.0-rc1" description = "The official Python library for the writer API" dynamic = ["readme"] license = "Apache-2.0" authors = [ { name = "Writer", email = "dev-feedback@writer.com" }, ] + dependencies = [ - "httpx>=0.23.0, <1", - "pydantic>=1.9.0, <3", - "typing-extensions>=4.10, <5", - "anyio>=3.5.0, <5", - "distro>=1.7.0, <2", - "sniffio", + "httpx>=0.23.0, <1", + "pydantic>=1.9.0, <3", + "typing-extensions>=4.10, <5", + "anyio>=3.5.0, <5", + "distro>=1.7.0, <2", + "sniffio", "cached-property; python_version < '3.8'", "jiter>=0.4.0, <1", ] + requires-python = ">= 3.9" classifiers = [ "Typing :: Typed", diff --git a/requirements-dev.lock b/requirements-dev.lock index 642272ac..b416f80c 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -12,44 +12,47 @@ -e file:. aiohappyeyeballs==2.6.1 # via aiohttp -aiohttp==3.12.8 +aiohttp==3.13.2 # via httpx-aiohttp # via writer-sdk -aiosignal==1.3.2 +aiosignal==1.4.0 # via aiohttp -annotated-types==0.6.0 +annotated-types==0.7.0 # via pydantic -anyio==4.4.0 +anyio==4.12.0 # via httpx # via writer-sdk -argcomplete==3.1.2 +argcomplete==3.6.3 # via nox asttokens==3.0.0 # via inline-snapshot async-timeout==5.0.1 # via aiohttp -attrs==25.3.0 +attrs==25.4.0 # via aiohttp -certifi==2023.7.22 + # via nox +backports-asyncio-runner==1.2.0 + # via pytest-asyncio +certifi==2025.11.12 # via httpcore # via httpx -colorlog==6.7.0 +colorlog==6.10.1 # via nox -dirty-equals==0.6.0 -distlib==0.3.7 +dependency-groups==1.3.1 + # via nox +dirty-equals==0.11 +distlib==0.4.0 # via virtualenv -distro==1.8.0 +distro==1.9.0 # via writer-sdk -exceptiongroup==1.2.2 +exceptiongroup==1.3.1 # via anyio # via pytest -execnet==2.1.1 +execnet==2.1.2 # via pytest-xdist -executing==2.2.0 - # via inline-snapshot -filelock==3.12.4 +filelock==3.19.1 # via virtualenv -frozenlist==1.6.2 +frozenlist==1.8.0 # via aiohttp # via aiosignal h11==0.16.0 @@ -62,12 +65,14 @@ httpx==0.28.1 # via writer-sdk httpx-aiohttp==0.1.9 # via writer-sdk -idna==3.4 +humanize==4.13.0 + # via nox +idna==3.11 # via anyio # via httpx # via yarl -importlib-metadata==7.0.0 -iniconfig==2.0.0 +importlib-metadata==8.7.0 +iniconfig==2.1.0 # via pytest inline-snapshot==0.20.5 jiter==0.8.2 @@ -76,74 +81,74 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -multidict==6.4.4 +multidict==6.7.0 # via aiohttp # via yarl mypy==1.17.0 -mypy-extensions==1.0.0 +mypy-extensions==1.1.0 # via mypy -nest-asyncio==1.6.0 -nodeenv==1.8.0 +nodeenv==1.9.1 # via pyright -nox==2023.4.22 -packaging==23.2 +nox==2025.11.12 +packaging==25.0 + # via dependency-groups # via nox # via pytest pathspec==0.12.1 # via mypy -platformdirs==3.11.0 +platformdirs==4.4.0 # via virtualenv -pluggy==1.5.0 +pluggy==1.6.0 # via pytest -propcache==0.3.1 +propcache==0.4.1 # via aiohttp # via yarl -pydantic==2.11.9 +pydantic==2.12.5 # via writer-sdk -pydantic-core==2.33.2 +pydantic-core==2.41.5 # via pydantic -pygments==2.18.0 +pygments==2.19.2 + # via pytest # via rich pyright==1.1.399 -pytest==8.3.3 +pytest==8.4.2 # via pytest-asyncio # via pytest-xdist -pytest-asyncio==0.24.0 -pytest-xdist==3.7.0 -python-dateutil==2.8.2 +pytest-asyncio==1.2.0 +pytest-xdist==3.8.0 +python-dateutil==2.9.0.post0 # via time-machine -pytz==2023.3.post1 - # via dirty-equals respx==0.22.0 -rich==13.7.1 - # via inline-snapshot -ruff==0.9.4 -setuptools==68.2.2 - # via nodeenv -six==1.16.0 +rich==14.2.0 +ruff==0.14.7 +six==1.17.0 # via python-dateutil -sniffio==1.3.0 - # via anyio +sniffio==1.3.1 # via writer-sdk -time-machine==2.9.0 -tomli==2.0.2 - # via inline-snapshot +time-machine==2.19.0 +tomli==2.3.0 + # via dependency-groups # via mypy + # via nox # via pytest -typing-extensions==4.12.2 +typing-extensions==4.15.0 + # via aiosignal # via anyio + # via exceptiongroup # via multidict # via mypy # via pydantic # via pydantic-core # via pyright + # via pytest-asyncio # via typing-inspection + # via virtualenv # via writer-sdk -typing-inspection==0.4.1 +typing-inspection==0.4.2 # via pydantic -virtualenv==20.24.5 +virtualenv==20.35.4 # via nox -yarl==1.20.0 +yarl==1.22.0 # via aiohttp -zipp==3.17.0 +zipp==3.23.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index 5d1ba1fa..90343d3b 100644 --- a/requirements.lock +++ b/requirements.lock @@ -12,28 +12,28 @@ -e file:. aiohappyeyeballs==2.6.1 # via aiohttp -aiohttp==3.12.8 +aiohttp==3.13.2 # via httpx-aiohttp # via writer-sdk -aiosignal==1.3.2 +aiosignal==1.4.0 # via aiohttp -annotated-types==0.6.0 +annotated-types==0.7.0 # via pydantic -anyio==4.4.0 +anyio==4.12.0 # via httpx # via writer-sdk async-timeout==5.0.1 # via aiohttp -attrs==25.3.0 +attrs==25.4.0 # via aiohttp -certifi==2023.7.22 +certifi==2025.11.12 # via httpcore # via httpx -distro==1.8.0 +distro==1.9.0 # via writer-sdk -exceptiongroup==1.2.2 +exceptiongroup==1.3.1 # via anyio -frozenlist==1.6.2 +frozenlist==1.8.0 # via aiohttp # via aiosignal h11==0.16.0 @@ -45,27 +45,26 @@ httpx==0.28.1 # via writer-sdk httpx-aiohttp==0.1.9 # via writer-sdk -idna==3.4 +idna==3.11 # via anyio # via httpx # via yarl -jiter==0.8.2 - # via writer-sdk -multidict==6.4.4 +multidict==6.7.0 # via aiohttp # via yarl -propcache==0.3.1 +propcache==0.4.1 # via aiohttp # via yarl pydantic==2.12.5 # via writer-sdk pydantic-core==2.41.5 # via pydantic -sniffio==1.3.0 - # via anyio +sniffio==1.3.1 # via writer-sdk typing-extensions==4.15.0 + # via aiosignal # via anyio + # via exceptiongroup # via multidict # via pydantic # via pydantic-core @@ -73,5 +72,5 @@ typing-extensions==4.15.0 # via writer-sdk typing-inspection==0.4.2 # via pydantic -yarl==1.20.0 +yarl==1.22.0 # via aiohttp diff --git a/scripts/lint b/scripts/lint index a3bd86b1..6ac647f4 100755 --- a/scripts/lint +++ b/scripts/lint @@ -4,8 +4,13 @@ set -e cd "$(dirname "$0")/.." -echo "==> Running lints" -rye run lint +if [ "$1" = "--fix" ]; then + echo "==> Running lints with --fix" + rye run fix:ruff +else + echo "==> Running lints" + rye run lint +fi echo "==> Making sure it imports" rye run python -c 'import writerai' diff --git a/src/writerai/_base_client.py b/src/writerai/_base_client.py index bf5efcbe..04853fca 100644 --- a/src/writerai/_base_client.py +++ b/src/writerai/_base_client.py @@ -1259,9 +1259,12 @@ def patch( *, cast_to: Type[ResponseT], body: Body | None = None, + files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + opts = FinalRequestOptions.construct( + method="patch", url=path, json_data=body, files=to_httpx_files(files), **options + ) return self.request(cast_to, opts) def put( @@ -1788,9 +1791,12 @@ async def patch( *, cast_to: Type[ResponseT], body: Body | None = None, + files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: - opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) + opts = FinalRequestOptions.construct( + method="patch", url=path, json_data=body, files=await async_to_httpx_files(files), **options + ) return await self.request(cast_to, opts) async def put( diff --git a/src/writerai/_client.py b/src/writerai/_client.py index 5d9aec84..0f4a5ca0 100644 --- a/src/writerai/_client.py +++ b/src/writerai/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import Any, Mapping +from typing import TYPE_CHECKING, Any, Mapping from typing_extensions import Self, override import httpx @@ -21,8 +21,8 @@ not_given, ) from ._utils import is_given, get_async_library +from ._compat import cached_property from ._version import __version__ -from .resources import chat, files, graphs, models, vision, completions, translation from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import WriterError, APIStatusError from ._base_client import ( @@ -30,8 +30,18 @@ SyncAPIClient, AsyncAPIClient, ) -from .resources.tools import tools -from .resources.applications import applications + +if TYPE_CHECKING: + from .resources import chat, files, tools, graphs, models, vision, completions, translation, applications + from .resources.chat import ChatResource, AsyncChatResource + from .resources.files import FilesResource, AsyncFilesResource + from .resources.graphs import GraphsResource, AsyncGraphsResource + from .resources.models import ModelsResource, AsyncModelsResource + from .resources.vision import VisionResource, AsyncVisionResource + from .resources.completions import CompletionsResource, AsyncCompletionsResource + from .resources.tools.tools import ToolsResource, AsyncToolsResource + from .resources.translation import TranslationResource, AsyncTranslationResource + from .resources.applications.applications import ApplicationsResource, AsyncApplicationsResource __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "Writer", "AsyncWriter", "Client", "AsyncClient"] @@ -67,18 +77,6 @@ def _extract_sdk_env_headers() -> dict[str, str]: class Writer(SyncAPIClient): - applications: applications.ApplicationsResource - chat: chat.ChatResource - completions: completions.CompletionsResource - models: models.ModelsResource - graphs: graphs.GraphsResource - files: files.FilesResource - tools: tools.ToolsResource - translation: translation.TranslationResource - vision: vision.VisionResource - with_raw_response: WriterWithRawResponse - with_streaming_response: WriterWithStreamedResponse - # client options api_key: str @@ -138,17 +136,67 @@ def __init__( self._default_stream_cls = Stream - self.applications = applications.ApplicationsResource(self) - self.chat = chat.ChatResource(self) - self.completions = completions.CompletionsResource(self) - self.models = models.ModelsResource(self) - self.graphs = graphs.GraphsResource(self) - self.files = files.FilesResource(self) - self.tools = tools.ToolsResource(self) - self.translation = translation.TranslationResource(self) - self.vision = vision.VisionResource(self) - self.with_raw_response = WriterWithRawResponse(self) - self.with_streaming_response = WriterWithStreamedResponse(self) + @cached_property + def applications(self) -> ApplicationsResource: + from .resources.applications import ApplicationsResource + + return ApplicationsResource(self) + + @cached_property + def chat(self) -> ChatResource: + from .resources.chat import ChatResource + + return ChatResource(self) + + @cached_property + def completions(self) -> CompletionsResource: + from .resources.completions import CompletionsResource + + return CompletionsResource(self) + + @cached_property + def models(self) -> ModelsResource: + from .resources.models import ModelsResource + + return ModelsResource(self) + + @cached_property + def graphs(self) -> GraphsResource: + from .resources.graphs import GraphsResource + + return GraphsResource(self) + + @cached_property + def files(self) -> FilesResource: + from .resources.files import FilesResource + + return FilesResource(self) + + @cached_property + def tools(self) -> ToolsResource: + from .resources.tools import ToolsResource + + return ToolsResource(self) + + @cached_property + def translation(self) -> TranslationResource: + from .resources.translation import TranslationResource + + return TranslationResource(self) + + @cached_property + def vision(self) -> VisionResource: + from .resources.vision import VisionResource + + return VisionResource(self) + + @cached_property + def with_raw_response(self) -> WriterWithRawResponse: + return WriterWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> WriterWithStreamedResponse: + return WriterWithStreamedResponse(self) @property @override @@ -256,18 +304,6 @@ def _make_status_error( class AsyncWriter(AsyncAPIClient): - applications: applications.AsyncApplicationsResource - chat: chat.AsyncChatResource - completions: completions.AsyncCompletionsResource - models: models.AsyncModelsResource - graphs: graphs.AsyncGraphsResource - files: files.AsyncFilesResource - tools: tools.AsyncToolsResource - translation: translation.AsyncTranslationResource - vision: vision.AsyncVisionResource - with_raw_response: AsyncWriterWithRawResponse - with_streaming_response: AsyncWriterWithStreamedResponse - # client options api_key: str @@ -327,17 +363,67 @@ def __init__( self._default_stream_cls = AsyncStream - self.applications = applications.AsyncApplicationsResource(self) - self.chat = chat.AsyncChatResource(self) - self.completions = completions.AsyncCompletionsResource(self) - self.models = models.AsyncModelsResource(self) - self.graphs = graphs.AsyncGraphsResource(self) - self.files = files.AsyncFilesResource(self) - self.tools = tools.AsyncToolsResource(self) - self.translation = translation.AsyncTranslationResource(self) - self.vision = vision.AsyncVisionResource(self) - self.with_raw_response = AsyncWriterWithRawResponse(self) - self.with_streaming_response = AsyncWriterWithStreamedResponse(self) + @cached_property + def applications(self) -> AsyncApplicationsResource: + from .resources.applications import AsyncApplicationsResource + + return AsyncApplicationsResource(self) + + @cached_property + def chat(self) -> AsyncChatResource: + from .resources.chat import AsyncChatResource + + return AsyncChatResource(self) + + @cached_property + def completions(self) -> AsyncCompletionsResource: + from .resources.completions import AsyncCompletionsResource + + return AsyncCompletionsResource(self) + + @cached_property + def models(self) -> AsyncModelsResource: + from .resources.models import AsyncModelsResource + + return AsyncModelsResource(self) + + @cached_property + def graphs(self) -> AsyncGraphsResource: + from .resources.graphs import AsyncGraphsResource + + return AsyncGraphsResource(self) + + @cached_property + def files(self) -> AsyncFilesResource: + from .resources.files import AsyncFilesResource + + return AsyncFilesResource(self) + + @cached_property + def tools(self) -> AsyncToolsResource: + from .resources.tools import AsyncToolsResource + + return AsyncToolsResource(self) + + @cached_property + def translation(self) -> AsyncTranslationResource: + from .resources.translation import AsyncTranslationResource + + return AsyncTranslationResource(self) + + @cached_property + def vision(self) -> AsyncVisionResource: + from .resources.vision import AsyncVisionResource + + return AsyncVisionResource(self) + + @cached_property + def with_raw_response(self) -> AsyncWriterWithRawResponse: + return AsyncWriterWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncWriterWithStreamedResponse: + return AsyncWriterWithStreamedResponse(self) @property @override @@ -445,55 +531,247 @@ def _make_status_error( class WriterWithRawResponse: + _client: Writer + def __init__(self, client: Writer) -> None: - self.applications = applications.ApplicationsResourceWithRawResponse(client.applications) - self.chat = chat.ChatResourceWithRawResponse(client.chat) - self.completions = completions.CompletionsResourceWithRawResponse(client.completions) - self.models = models.ModelsResourceWithRawResponse(client.models) - self.graphs = graphs.GraphsResourceWithRawResponse(client.graphs) - self.files = files.FilesResourceWithRawResponse(client.files) - self.tools = tools.ToolsResourceWithRawResponse(client.tools) - self.translation = translation.TranslationResourceWithRawResponse(client.translation) - self.vision = vision.VisionResourceWithRawResponse(client.vision) + self._client = client + + @cached_property + def applications(self) -> applications.ApplicationsResourceWithRawResponse: + from .resources.applications import ApplicationsResourceWithRawResponse + + return ApplicationsResourceWithRawResponse(self._client.applications) + + @cached_property + def chat(self) -> chat.ChatResourceWithRawResponse: + from .resources.chat import ChatResourceWithRawResponse + + return ChatResourceWithRawResponse(self._client.chat) + + @cached_property + def completions(self) -> completions.CompletionsResourceWithRawResponse: + from .resources.completions import CompletionsResourceWithRawResponse + + return CompletionsResourceWithRawResponse(self._client.completions) + + @cached_property + def models(self) -> models.ModelsResourceWithRawResponse: + from .resources.models import ModelsResourceWithRawResponse + + return ModelsResourceWithRawResponse(self._client.models) + + @cached_property + def graphs(self) -> graphs.GraphsResourceWithRawResponse: + from .resources.graphs import GraphsResourceWithRawResponse + + return GraphsResourceWithRawResponse(self._client.graphs) + + @cached_property + def files(self) -> files.FilesResourceWithRawResponse: + from .resources.files import FilesResourceWithRawResponse + + return FilesResourceWithRawResponse(self._client.files) + + @cached_property + def tools(self) -> tools.ToolsResourceWithRawResponse: + from .resources.tools import ToolsResourceWithRawResponse + + return ToolsResourceWithRawResponse(self._client.tools) + + @cached_property + def translation(self) -> translation.TranslationResourceWithRawResponse: + from .resources.translation import TranslationResourceWithRawResponse + + return TranslationResourceWithRawResponse(self._client.translation) + + @cached_property + def vision(self) -> vision.VisionResourceWithRawResponse: + from .resources.vision import VisionResourceWithRawResponse + + return VisionResourceWithRawResponse(self._client.vision) class AsyncWriterWithRawResponse: + _client: AsyncWriter + def __init__(self, client: AsyncWriter) -> None: - self.applications = applications.AsyncApplicationsResourceWithRawResponse(client.applications) - self.chat = chat.AsyncChatResourceWithRawResponse(client.chat) - self.completions = completions.AsyncCompletionsResourceWithRawResponse(client.completions) - self.models = models.AsyncModelsResourceWithRawResponse(client.models) - self.graphs = graphs.AsyncGraphsResourceWithRawResponse(client.graphs) - self.files = files.AsyncFilesResourceWithRawResponse(client.files) - self.tools = tools.AsyncToolsResourceWithRawResponse(client.tools) - self.translation = translation.AsyncTranslationResourceWithRawResponse(client.translation) - self.vision = vision.AsyncVisionResourceWithRawResponse(client.vision) + self._client = client + + @cached_property + def applications(self) -> applications.AsyncApplicationsResourceWithRawResponse: + from .resources.applications import AsyncApplicationsResourceWithRawResponse + + return AsyncApplicationsResourceWithRawResponse(self._client.applications) + + @cached_property + def chat(self) -> chat.AsyncChatResourceWithRawResponse: + from .resources.chat import AsyncChatResourceWithRawResponse + + return AsyncChatResourceWithRawResponse(self._client.chat) + + @cached_property + def completions(self) -> completions.AsyncCompletionsResourceWithRawResponse: + from .resources.completions import AsyncCompletionsResourceWithRawResponse + + return AsyncCompletionsResourceWithRawResponse(self._client.completions) + + @cached_property + def models(self) -> models.AsyncModelsResourceWithRawResponse: + from .resources.models import AsyncModelsResourceWithRawResponse + + return AsyncModelsResourceWithRawResponse(self._client.models) + + @cached_property + def graphs(self) -> graphs.AsyncGraphsResourceWithRawResponse: + from .resources.graphs import AsyncGraphsResourceWithRawResponse + + return AsyncGraphsResourceWithRawResponse(self._client.graphs) + + @cached_property + def files(self) -> files.AsyncFilesResourceWithRawResponse: + from .resources.files import AsyncFilesResourceWithRawResponse + + return AsyncFilesResourceWithRawResponse(self._client.files) + + @cached_property + def tools(self) -> tools.AsyncToolsResourceWithRawResponse: + from .resources.tools import AsyncToolsResourceWithRawResponse + + return AsyncToolsResourceWithRawResponse(self._client.tools) + + @cached_property + def translation(self) -> translation.AsyncTranslationResourceWithRawResponse: + from .resources.translation import AsyncTranslationResourceWithRawResponse + + return AsyncTranslationResourceWithRawResponse(self._client.translation) + + @cached_property + def vision(self) -> vision.AsyncVisionResourceWithRawResponse: + from .resources.vision import AsyncVisionResourceWithRawResponse + + return AsyncVisionResourceWithRawResponse(self._client.vision) class WriterWithStreamedResponse: + _client: Writer + def __init__(self, client: Writer) -> None: - self.applications = applications.ApplicationsResourceWithStreamingResponse(client.applications) - self.chat = chat.ChatResourceWithStreamingResponse(client.chat) - self.completions = completions.CompletionsResourceWithStreamingResponse(client.completions) - self.models = models.ModelsResourceWithStreamingResponse(client.models) - self.graphs = graphs.GraphsResourceWithStreamingResponse(client.graphs) - self.files = files.FilesResourceWithStreamingResponse(client.files) - self.tools = tools.ToolsResourceWithStreamingResponse(client.tools) - self.translation = translation.TranslationResourceWithStreamingResponse(client.translation) - self.vision = vision.VisionResourceWithStreamingResponse(client.vision) + self._client = client + + @cached_property + def applications(self) -> applications.ApplicationsResourceWithStreamingResponse: + from .resources.applications import ApplicationsResourceWithStreamingResponse + + return ApplicationsResourceWithStreamingResponse(self._client.applications) + + @cached_property + def chat(self) -> chat.ChatResourceWithStreamingResponse: + from .resources.chat import ChatResourceWithStreamingResponse + + return ChatResourceWithStreamingResponse(self._client.chat) + + @cached_property + def completions(self) -> completions.CompletionsResourceWithStreamingResponse: + from .resources.completions import CompletionsResourceWithStreamingResponse + + return CompletionsResourceWithStreamingResponse(self._client.completions) + + @cached_property + def models(self) -> models.ModelsResourceWithStreamingResponse: + from .resources.models import ModelsResourceWithStreamingResponse + + return ModelsResourceWithStreamingResponse(self._client.models) + + @cached_property + def graphs(self) -> graphs.GraphsResourceWithStreamingResponse: + from .resources.graphs import GraphsResourceWithStreamingResponse + + return GraphsResourceWithStreamingResponse(self._client.graphs) + + @cached_property + def files(self) -> files.FilesResourceWithStreamingResponse: + from .resources.files import FilesResourceWithStreamingResponse + + return FilesResourceWithStreamingResponse(self._client.files) + + @cached_property + def tools(self) -> tools.ToolsResourceWithStreamingResponse: + from .resources.tools import ToolsResourceWithStreamingResponse + + return ToolsResourceWithStreamingResponse(self._client.tools) + + @cached_property + def translation(self) -> translation.TranslationResourceWithStreamingResponse: + from .resources.translation import TranslationResourceWithStreamingResponse + + return TranslationResourceWithStreamingResponse(self._client.translation) + + @cached_property + def vision(self) -> vision.VisionResourceWithStreamingResponse: + from .resources.vision import VisionResourceWithStreamingResponse + + return VisionResourceWithStreamingResponse(self._client.vision) class AsyncWriterWithStreamedResponse: + _client: AsyncWriter + def __init__(self, client: AsyncWriter) -> None: - self.applications = applications.AsyncApplicationsResourceWithStreamingResponse(client.applications) - self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat) - self.completions = completions.AsyncCompletionsResourceWithStreamingResponse(client.completions) - self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) - self.graphs = graphs.AsyncGraphsResourceWithStreamingResponse(client.graphs) - self.files = files.AsyncFilesResourceWithStreamingResponse(client.files) - self.tools = tools.AsyncToolsResourceWithStreamingResponse(client.tools) - self.translation = translation.AsyncTranslationResourceWithStreamingResponse(client.translation) - self.vision = vision.AsyncVisionResourceWithStreamingResponse(client.vision) + self._client = client + + @cached_property + def applications(self) -> applications.AsyncApplicationsResourceWithStreamingResponse: + from .resources.applications import AsyncApplicationsResourceWithStreamingResponse + + return AsyncApplicationsResourceWithStreamingResponse(self._client.applications) + + @cached_property + def chat(self) -> chat.AsyncChatResourceWithStreamingResponse: + from .resources.chat import AsyncChatResourceWithStreamingResponse + + return AsyncChatResourceWithStreamingResponse(self._client.chat) + + @cached_property + def completions(self) -> completions.AsyncCompletionsResourceWithStreamingResponse: + from .resources.completions import AsyncCompletionsResourceWithStreamingResponse + + return AsyncCompletionsResourceWithStreamingResponse(self._client.completions) + + @cached_property + def models(self) -> models.AsyncModelsResourceWithStreamingResponse: + from .resources.models import AsyncModelsResourceWithStreamingResponse + + return AsyncModelsResourceWithStreamingResponse(self._client.models) + + @cached_property + def graphs(self) -> graphs.AsyncGraphsResourceWithStreamingResponse: + from .resources.graphs import AsyncGraphsResourceWithStreamingResponse + + return AsyncGraphsResourceWithStreamingResponse(self._client.graphs) + + @cached_property + def files(self) -> files.AsyncFilesResourceWithStreamingResponse: + from .resources.files import AsyncFilesResourceWithStreamingResponse + + return AsyncFilesResourceWithStreamingResponse(self._client.files) + + @cached_property + def tools(self) -> tools.AsyncToolsResourceWithStreamingResponse: + from .resources.tools import AsyncToolsResourceWithStreamingResponse + + return AsyncToolsResourceWithStreamingResponse(self._client.tools) + + @cached_property + def translation(self) -> translation.AsyncTranslationResourceWithStreamingResponse: + from .resources.translation import AsyncTranslationResourceWithStreamingResponse + + return AsyncTranslationResourceWithStreamingResponse(self._client.translation) + + @cached_property + def vision(self) -> vision.AsyncVisionResourceWithStreamingResponse: + from .resources.vision import AsyncVisionResourceWithStreamingResponse + + return AsyncVisionResourceWithStreamingResponse(self._client.vision) Client = Writer diff --git a/src/writerai/_constants.py b/src/writerai/_constants.py index eeeb0942..9a4c97ab 100644 --- a/src/writerai/_constants.py +++ b/src/writerai/_constants.py @@ -7,8 +7,8 @@ # default timeout is 3 minutes DEFAULT_TIMEOUT = httpx.Timeout(timeout=180, connect=5.0) -DEFAULT_MAX_RETRIES = 2 +DEFAULT_MAX_RETRIES = 7 DEFAULT_CONNECTION_LIMITS = httpx.Limits(max_connections=100, max_keepalive_connections=20) -INITIAL_RETRY_DELAY = 0.5 -MAX_RETRY_DELAY = 8.0 +INITIAL_RETRY_DELAY = 1.0 +MAX_RETRY_DELAY = 60.0 diff --git a/src/writerai/_types.py b/src/writerai/_types.py index 146fcfb6..ed3a7f53 100644 --- a/src/writerai/_types.py +++ b/src/writerai/_types.py @@ -243,6 +243,9 @@ class HttpxSendArgs(TypedDict, total=False): if TYPE_CHECKING: # This works because str.__contains__ does not accept object (either in typeshed or at runtime) # https://github.com/hauntsaninja/useful_types/blob/5e9710f3875107d068e7679fd7fec9cfab0eff3b/useful_types/__init__.py#L285 + # + # Note: index() and count() methods are intentionally omitted to allow pyright to properly + # infer TypedDict types when dict literals are used in lists assigned to SequenceNotStr. class SequenceNotStr(Protocol[_T_co]): @overload def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... @@ -251,8 +254,6 @@ def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... def __contains__(self, value: object, /) -> bool: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[_T_co]: ... - def index(self, value: Any, start: int = 0, stop: int = ..., /) -> int: ... - def count(self, value: Any, /) -> int: ... def __reversed__(self) -> Iterator[_T_co]: ... else: # just point this to a normal `Sequence` at runtime to avoid having to special case diff --git a/src/writerai/_version.py b/src/writerai/_version.py index 6ff9a512..51018589 100644 --- a/src/writerai/_version.py +++ b/src/writerai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "writerai" -__version__ = "2.3.3-rc1" # x-release-please-version +__version__ = "2.4.0-rc1" # x-release-please-version diff --git a/src/writerai/types/application_list_response.py b/src/writerai/types/application_list_response.py index e80b6e90..d6eb0734 100644 --- a/src/writerai/types/application_list_response.py +++ b/src/writerai/types/application_list_response.py @@ -18,11 +18,15 @@ class InputOptionsApplicationInputDropdownOptions(BaseModel): + """Configuration options specific to dropdown-type input fields.""" + list: List[str] """List of available options in the dropdown menu.""" class InputOptionsApplicationInputFileOptions(BaseModel): + """Configuration options specific to file upload input fields.""" + file_types: List[str] """List of allowed file extensions.""" @@ -40,6 +44,8 @@ class InputOptionsApplicationInputFileOptions(BaseModel): class InputOptionsApplicationInputMediaOptions(BaseModel): + """Configuration options specific to media upload input fields.""" + file_types: List[str] """List of allowed media file types.""" @@ -48,6 +54,8 @@ class InputOptionsApplicationInputMediaOptions(BaseModel): class InputOptionsApplicationInputTextOptions(BaseModel): + """Configuration options specific to text input fields.""" + max_fields: int """Maximum number of text fields allowed.""" @@ -64,6 +72,8 @@ class InputOptionsApplicationInputTextOptions(BaseModel): class Input(BaseModel): + """Configuration for an individual input field in the application.""" + input_type: Literal["text", "dropdown", "file", "media"] """Type of input field determining its behavior and validation rules.""" @@ -81,6 +91,8 @@ class Input(BaseModel): class ApplicationListResponse(BaseModel): + """Detailed application object including its input configuration.""" + id: str """Unique identifier for the application.""" diff --git a/src/writerai/types/application_retrieve_response.py b/src/writerai/types/application_retrieve_response.py index 00827b7a..6fec026a 100644 --- a/src/writerai/types/application_retrieve_response.py +++ b/src/writerai/types/application_retrieve_response.py @@ -18,11 +18,15 @@ class InputOptionsApplicationInputDropdownOptions(BaseModel): + """Configuration options specific to dropdown-type input fields.""" + list: List[str] """List of available options in the dropdown menu.""" class InputOptionsApplicationInputFileOptions(BaseModel): + """Configuration options specific to file upload input fields.""" + file_types: List[str] """List of allowed file extensions.""" @@ -40,6 +44,8 @@ class InputOptionsApplicationInputFileOptions(BaseModel): class InputOptionsApplicationInputMediaOptions(BaseModel): + """Configuration options specific to media upload input fields.""" + file_types: List[str] """List of allowed media file types.""" @@ -48,6 +54,8 @@ class InputOptionsApplicationInputMediaOptions(BaseModel): class InputOptionsApplicationInputTextOptions(BaseModel): + """Configuration options specific to text input fields.""" + max_fields: int """Maximum number of text fields allowed.""" @@ -64,6 +72,8 @@ class InputOptionsApplicationInputTextOptions(BaseModel): class Input(BaseModel): + """Configuration for an individual input field in the application.""" + input_type: Literal["text", "dropdown", "file", "media"] """Type of input field determining its behavior and validation rules.""" @@ -81,6 +91,8 @@ class Input(BaseModel): class ApplicationRetrieveResponse(BaseModel): + """Detailed application object including its input configuration.""" + id: str """Unique identifier for the application.""" diff --git a/src/writerai/types/chat_chat_params.py b/src/writerai/types/chat_chat_params.py index 522248af..b7b3d830 100644 --- a/src/writerai/types/chat_chat_params.py +++ b/src/writerai/types/chat_chat_params.py @@ -122,6 +122,8 @@ class ChatChatParamsBase(TypedDict, total=False): class MessageContentMixedContentTextFragment(TypedDict, total=False): + """Represents a text content fragment within a chat message.""" + text: Required[str] """The actual text content of the message fragment.""" @@ -130,6 +132,8 @@ class MessageContentMixedContentTextFragment(TypedDict, total=False): class MessageContentMixedContentImageFragmentImageURL(TypedDict, total=False): + """The image URL object containing the location of the image.""" + url: Required[str] """The URL pointing to the image file. @@ -138,6 +142,11 @@ class MessageContentMixedContentImageFragmentImageURL(TypedDict, total=False): class MessageContentMixedContentImageFragment(TypedDict, total=False): + """Represents an image content fragment within a chat message. + + Note: This content type is only supported with the Palmyra X5 model. + """ + image_url: Required[MessageContentMixedContentImageFragmentImageURL] """The image URL object containing the location of the image.""" @@ -184,6 +193,12 @@ class Message(TypedDict, total=False): class ResponseFormat(TypedDict, total=False): + """ + The response format to use for the chat completion, available with `palmyra-x4` and `palmyra-x5`. + + `text` is the default response format. [JSON Schema](https://json-schema.org/) is supported for structured responses. If you specify `json_schema`, you must also provide a `json_schema` object. + """ + type: Required[Literal["text", "json_schema"]] """The type of response format to use.""" @@ -192,6 +207,8 @@ class ResponseFormat(TypedDict, total=False): class StreamOptions(TypedDict, total=False): + """Additional options for streaming.""" + include_usage: Required[bool] """Indicate whether to include usage information.""" diff --git a/src/writerai/types/chat_completion_chunk.py b/src/writerai/types/chat_completion_chunk.py index 6d873355..bc3bf85b 100644 --- a/src/writerai/types/chat_completion_chunk.py +++ b/src/writerai/types/chat_completion_chunk.py @@ -33,6 +33,8 @@ class ChoiceDeltaTranslationData(BaseModel): class ChoiceDelta(BaseModel): + """A chat completion delta generated by streamed model responses.""" + content: Optional[str] = None """The text content produced by the model. diff --git a/src/writerai/types/chat_completion_message.py b/src/writerai/types/chat_completion_message.py index 5b4cb30b..97045c61 100644 --- a/src/writerai/types/chat_completion_message.py +++ b/src/writerai/types/chat_completion_message.py @@ -40,6 +40,11 @@ class WebSearchData(BaseModel): class ChatCompletionMessage(BaseModel): + """The chat completion message from the model. + + Note: this field is deprecated for streaming. Use `delta` instead. + """ + content: str """The text content produced by the model. diff --git a/src/writerai/types/chat_completion_usage.py b/src/writerai/types/chat_completion_usage.py index 6fc89018..ec7b7991 100644 --- a/src/writerai/types/chat_completion_usage.py +++ b/src/writerai/types/chat_completion_usage.py @@ -16,6 +16,11 @@ class PromptTokenDetails(BaseModel): class ChatCompletionUsage(BaseModel): + """Usage information for the chat completion response. + + Please note that at this time Knowledge Graph tool usage is not included in this object. + """ + completion_tokens: int prompt_tokens: int diff --git a/src/writerai/types/graph.py b/src/writerai/types/graph.py index 469c0da6..7721c972 100644 --- a/src/writerai/types/graph.py +++ b/src/writerai/types/graph.py @@ -10,6 +10,8 @@ class FileStatus(BaseModel): + """The processing status of files in the Knowledge Graph.""" + completed: int """The number of files that have been successfully processed.""" @@ -24,6 +26,8 @@ class FileStatus(BaseModel): class URLStatus(BaseModel): + """The current status of the URL processing.""" + status: Literal["validating", "success", "error"] """The current status of the URL processing.""" diff --git a/src/writerai/types/graph_create_response.py b/src/writerai/types/graph_create_response.py index 11dcb958..87b471c0 100644 --- a/src/writerai/types/graph_create_response.py +++ b/src/writerai/types/graph_create_response.py @@ -10,6 +10,8 @@ class URLStatus(BaseModel): + """The current status of the URL processing.""" + status: Literal["validating", "success", "error"] """The current status of the URL processing.""" diff --git a/src/writerai/types/graph_question_params.py b/src/writerai/types/graph_question_params.py index a49563c7..dc61ae1d 100644 --- a/src/writerai/types/graph_question_params.py +++ b/src/writerai/types/graph_question_params.py @@ -28,6 +28,10 @@ class GraphQuestionParamsBase(TypedDict, total=False): class QueryConfig(TypedDict, total=False): + """ + Configuration options for Knowledge Graph queries, including search parameters and citation settings. + """ + grounding_level: float """ Level of grounding required for responses, controlling how closely answers must diff --git a/src/writerai/types/graph_update_response.py b/src/writerai/types/graph_update_response.py index bc17ebf3..6910f9d4 100644 --- a/src/writerai/types/graph_update_response.py +++ b/src/writerai/types/graph_update_response.py @@ -10,6 +10,8 @@ class URLStatus(BaseModel): + """The current status of the URL processing.""" + status: Literal["validating", "success", "error"] """The current status of the URL processing.""" diff --git a/src/writerai/types/question.py b/src/writerai/types/question.py index 9f3095bb..58a656cb 100644 --- a/src/writerai/types/question.py +++ b/src/writerai/types/question.py @@ -11,6 +11,10 @@ class ReferencesFile(BaseModel): + """ + A file-based reference containing text snippets from uploaded documents in the Knowledge Graph. + """ + file_id: str = FieldInfo(alias="fileId") """The unique identifier of the file in your Writer account.""" @@ -37,6 +41,10 @@ class ReferencesFile(BaseModel): class ReferencesWeb(BaseModel): + """ + A web-based reference containing text snippets from online sources accessed during the query. + """ + score: float """ Internal score used during the retrieval process for ranking and selecting @@ -57,6 +65,10 @@ class ReferencesWeb(BaseModel): class References(BaseModel): + """ + Detailed source information organized by reference type, providing comprehensive metadata about the sources used to generate the response. + """ + files: Optional[List[ReferencesFile]] = None """Array of file-based references from uploaded documents in the Knowledge Graph.""" @@ -65,6 +77,10 @@ class References(BaseModel): class Subquery(BaseModel): + """ + A sub-question generated to break down complex queries into more manageable parts, along with its answer and supporting sources. + """ + answer: str """The answer to the subquery based on Knowledge Graph content.""" diff --git a/src/writerai/types/shared/function_definition.py b/src/writerai/types/shared/function_definition.py index 357434f5..e71dd5fc 100644 --- a/src/writerai/types/shared/function_definition.py +++ b/src/writerai/types/shared/function_definition.py @@ -9,6 +9,8 @@ class FunctionDefinition(BaseModel): + """A tool that uses a custom function.""" + name: str """Name of the function.""" diff --git a/src/writerai/types/shared/graph_data.py b/src/writerai/types/shared/graph_data.py index 6d8eb341..bac0293d 100644 --- a/src/writerai/types/shared/graph_data.py +++ b/src/writerai/types/shared/graph_data.py @@ -12,6 +12,10 @@ class ReferencesFile(BaseModel): + """ + A file-based reference containing text snippets from uploaded documents in the Knowledge Graph. + """ + file_id: str = FieldInfo(alias="fileId") """The unique identifier of the file in your Writer account.""" @@ -38,6 +42,10 @@ class ReferencesFile(BaseModel): class ReferencesWeb(BaseModel): + """ + A web-based reference containing text snippets from online sources accessed during the query. + """ + score: float """ Internal score used during the retrieval process for ranking and selecting @@ -58,6 +66,10 @@ class ReferencesWeb(BaseModel): class References(BaseModel): + """ + Detailed source information organized by reference type, providing comprehensive metadata about the sources used to generate the response. + """ + files: Optional[List[ReferencesFile]] = None """Array of file-based references from uploaded documents in the Knowledge Graph.""" @@ -66,6 +78,10 @@ class References(BaseModel): class Subquery(BaseModel): + """ + A sub-question generated to break down complex queries into more manageable parts, along with its answer and supporting sources. + """ + answer: str """The answer to the subquery based on Knowledge Graph content.""" diff --git a/src/writerai/types/shared/logprobs_token.py b/src/writerai/types/shared/logprobs_token.py index 40c58d67..cf122336 100644 --- a/src/writerai/types/shared/logprobs_token.py +++ b/src/writerai/types/shared/logprobs_token.py @@ -8,6 +8,10 @@ class TopLogprob(BaseModel): + """ + An array of mappings for each token to its top log probabilities, showing detailed prediction probabilities. + """ + token: str logprob: float diff --git a/src/writerai/types/shared/source.py b/src/writerai/types/shared/source.py index 326d01ad..65debe9c 100644 --- a/src/writerai/types/shared/source.py +++ b/src/writerai/types/shared/source.py @@ -6,6 +6,8 @@ class Source(BaseModel): + """A source snippet containing text and fileId from Knowledge Graph content.""" + file_id: str """The unique identifier of the file in your Writer account.""" diff --git a/src/writerai/types/shared/tool_param.py b/src/writerai/types/shared/tool_param.py index 934e6bf4..1391010a 100644 --- a/src/writerai/types/shared/tool_param.py +++ b/src/writerai/types/shared/tool_param.py @@ -34,6 +34,10 @@ class FunctionTool(BaseModel): class GraphToolFunctionQueryConfig(BaseModel): + """ + Configuration options for Knowledge Graph queries, including search parameters and citation settings. + """ + grounding_level: Optional[float] = None """ Level of grounding required for responses, controlling how closely answers must @@ -101,6 +105,8 @@ class GraphToolFunctionQueryConfig(BaseModel): class GraphToolFunction(BaseModel): + """A tool that uses Knowledge Graphs as context for responses.""" + graph_ids: List[str] """An array of graph IDs to use in the tool.""" @@ -126,6 +132,8 @@ class GraphTool(BaseModel): class LlmToolFunction(BaseModel): + """A tool that uses another Writer model to generate a response.""" + description: str """A description of the model to use.""" @@ -142,6 +150,8 @@ class LlmTool(BaseModel): class TranslationToolFunction(BaseModel): + """A tool that uses Palmyra Translate to translate text.""" + formality: bool """Whether to use formal or informal language in the translation. @@ -194,6 +204,11 @@ class TranslationToolFunction(BaseModel): class TranslationTool(BaseModel): + """A tool that uses Palmyra Translate to translate text. + + Note that this tool does not stream results. The response is returned after the translation is complete. + """ + function: TranslationToolFunction """A tool that uses Palmyra Translate to translate text.""" @@ -221,6 +236,11 @@ class VisionToolFunctionVariable(BaseModel): class VisionToolFunction(BaseModel): + """A tool that uses Palmyra Vision to analyze images and documents. + + Supports JPG, PNG, PDF, and TXT files up to 7MB each. + """ + model: Literal["palmyra-vision"] """The model to use for image analysis.""" @@ -239,6 +259,8 @@ class VisionTool(BaseModel): class WebSearchToolFunction(BaseModel): + """A tool that uses web search to find information.""" + exclude_domains: List[str] """An array of domains to exclude from the search results.""" diff --git a/src/writerai/types/shared_params/function_definition.py b/src/writerai/types/shared_params/function_definition.py index 53606cb6..44dcd0c7 100644 --- a/src/writerai/types/shared_params/function_definition.py +++ b/src/writerai/types/shared_params/function_definition.py @@ -10,6 +10,8 @@ class FunctionDefinition(TypedDict, total=False): + """A tool that uses a custom function.""" + name: Required[str] """Name of the function.""" diff --git a/src/writerai/types/shared_params/graph_data.py b/src/writerai/types/shared_params/graph_data.py index 6e162c1d..70f0caad 100644 --- a/src/writerai/types/shared_params/graph_data.py +++ b/src/writerai/types/shared_params/graph_data.py @@ -12,6 +12,10 @@ class ReferencesFile(TypedDict, total=False): + """ + A file-based reference containing text snippets from uploaded documents in the Knowledge Graph. + """ + file_id: Required[Annotated[str, PropertyInfo(alias="fileId")]] """The unique identifier of the file in your Writer account.""" @@ -38,6 +42,10 @@ class ReferencesFile(TypedDict, total=False): class ReferencesWeb(TypedDict, total=False): + """ + A web-based reference containing text snippets from online sources accessed during the query. + """ + score: Required[float] """ Internal score used during the retrieval process for ranking and selecting @@ -58,6 +66,10 @@ class ReferencesWeb(TypedDict, total=False): class References(TypedDict, total=False): + """ + Detailed source information organized by reference type, providing comprehensive metadata about the sources used to generate the response. + """ + files: Iterable[ReferencesFile] """Array of file-based references from uploaded documents in the Knowledge Graph.""" @@ -66,6 +78,10 @@ class References(TypedDict, total=False): class Subquery(TypedDict, total=False): + """ + A sub-question generated to break down complex queries into more manageable parts, along with its answer and supporting sources. + """ + answer: Required[str] """The answer to the subquery based on Knowledge Graph content.""" diff --git a/src/writerai/types/shared_params/source.py b/src/writerai/types/shared_params/source.py index dc6726ba..a1397fb5 100644 --- a/src/writerai/types/shared_params/source.py +++ b/src/writerai/types/shared_params/source.py @@ -8,6 +8,8 @@ class Source(TypedDict, total=False): + """A source snippet containing text and fileId from Knowledge Graph content.""" + file_id: Required[str] """The unique identifier of the file in your Writer account.""" diff --git a/src/writerai/types/shared_params/tool_param.py b/src/writerai/types/shared_params/tool_param.py index 2ab4c094..1a9c4dd7 100644 --- a/src/writerai/types/shared_params/tool_param.py +++ b/src/writerai/types/shared_params/tool_param.py @@ -35,6 +35,10 @@ class FunctionTool(TypedDict, total=False): class GraphToolFunctionQueryConfig(TypedDict, total=False): + """ + Configuration options for Knowledge Graph queries, including search parameters and citation settings. + """ + grounding_level: float """ Level of grounding required for responses, controlling how closely answers must @@ -102,6 +106,8 @@ class GraphToolFunctionQueryConfig(TypedDict, total=False): class GraphToolFunction(TypedDict, total=False): + """A tool that uses Knowledge Graphs as context for responses.""" + graph_ids: Required[SequenceNotStr[str]] """An array of graph IDs to use in the tool.""" @@ -127,6 +133,8 @@ class GraphTool(TypedDict, total=False): class LlmToolFunction(TypedDict, total=False): + """A tool that uses another Writer model to generate a response.""" + description: Required[str] """A description of the model to use.""" @@ -143,6 +151,8 @@ class LlmTool(TypedDict, total=False): class TranslationToolFunction(TypedDict, total=False): + """A tool that uses Palmyra Translate to translate text.""" + formality: Required[bool] """Whether to use formal or informal language in the translation. @@ -195,6 +205,11 @@ class TranslationToolFunction(TypedDict, total=False): class TranslationTool(TypedDict, total=False): + """A tool that uses Palmyra Translate to translate text. + + Note that this tool does not stream results. The response is returned after the translation is complete. + """ + function: Required[TranslationToolFunction] """A tool that uses Palmyra Translate to translate text.""" @@ -222,6 +237,11 @@ class VisionToolFunctionVariable(TypedDict, total=False): class VisionToolFunction(TypedDict, total=False): + """A tool that uses Palmyra Vision to analyze images and documents. + + Supports JPG, PNG, PDF, and TXT files up to 7MB each. + """ + model: Required[Literal["palmyra-vision"]] """The model to use for image analysis.""" @@ -240,6 +260,8 @@ class VisionTool(TypedDict, total=False): class WebSearchToolFunction(TypedDict, total=False): + """A tool that uses web search to find information.""" + exclude_domains: Required[SequenceNotStr[str]] """An array of domains to exclude from the search results.""" diff --git a/src/writerai/types/vision_analyze_params.py b/src/writerai/types/vision_analyze_params.py index 1bcd12a4..2e0adf2d 100644 --- a/src/writerai/types/vision_analyze_params.py +++ b/src/writerai/types/vision_analyze_params.py @@ -24,6 +24,13 @@ class VisionAnalyzeParams(TypedDict, total=False): class Variable(TypedDict, total=False): + """An array of file variables required for the analysis. + + The files must be uploaded to the Writer platform before they can be used in a vision request. Learn how to upload files using the [Files API](https://dev.writer.com/api-reference/file-api/upload-files). + + Supported file types: JPG, PNG, PDF, TXT. The maximum allowed file size for each file is 7MB. + """ + file_id: Required[str] """The File ID of the file to analyze. diff --git a/tests/test_client.py b/tests/test_client.py index 4066755f..f90d3a04 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -92,7 +92,7 @@ def test_copy_default_options(self, client: Writer) -> None: # options that have a default are overridden correctly copied = client.copy(max_retries=7) assert copied.max_retries == 7 - assert client.max_retries == 2 + assert client.max_retries == 7 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 @@ -715,21 +715,21 @@ class Model(BaseModel): "remaining_retries,retry_after,timeout", [ [3, "20", 20], - [3, "0", 0.5], - [3, "-10", 0.5], + [3, "0", 1], + [3, "-10", 1], [3, "60", 60], - [3, "61", 0.5], + [3, "61", 1], [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], - [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], - [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:26:37 GMT", 1], + [3, "Fri, 29 Sep 2023 16:26:27 GMT", 1], [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], - [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], - [3, "99999999999999999999999999999999999", 0.5], - [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], - [3, "", 0.5], - [2, "", 0.5 * 2.0], - [1, "", 0.5 * 4.0], - [-1100, "", 8], # test large number potentially overflowing + [3, "Fri, 29 Sep 2023 16:27:38 GMT", 1], + [3, "99999999999999999999999999999999999", 1], + [3, "Zun, 29 Sep 2023 16:26:27 GMT", 1], + [3, "", 1], + [2, "", 1 * 2.0], + [1, "", 1 * 4.0], + [-1100, "", 60], # test large number potentially overflowing ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @@ -739,7 +739,7 @@ def test_parse_retry_after_header( headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) - assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] + assert calculated == pytest.approx(timeout, 1 * 0.875) # pyright: ignore[reportUnknownMemberType] @mock.patch("writerai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @@ -922,7 +922,7 @@ def test_copy_default_options(self, async_client: AsyncWriter) -> None: # options that have a default are overridden correctly copied = async_client.copy(max_retries=7) assert copied.max_retries == 7 - assert async_client.max_retries == 2 + assert async_client.max_retries == 7 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 @@ -1562,21 +1562,21 @@ class Model(BaseModel): "remaining_retries,retry_after,timeout", [ [3, "20", 20], - [3, "0", 0.5], - [3, "-10", 0.5], + [3, "0", 1], + [3, "-10", 1], [3, "60", 60], - [3, "61", 0.5], + [3, "61", 1], [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], - [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], - [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], + [3, "Fri, 29 Sep 2023 16:26:37 GMT", 1], + [3, "Fri, 29 Sep 2023 16:26:27 GMT", 1], [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], - [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], - [3, "99999999999999999999999999999999999", 0.5], - [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], - [3, "", 0.5], - [2, "", 0.5 * 2.0], - [1, "", 0.5 * 4.0], - [-1100, "", 8], # test large number potentially overflowing + [3, "Fri, 29 Sep 2023 16:27:38 GMT", 1], + [3, "99999999999999999999999999999999999", 1], + [3, "Zun, 29 Sep 2023 16:26:27 GMT", 1], + [3, "", 1], + [2, "", 1 * 2.0], + [1, "", 1 * 4.0], + [-1100, "", 60], # test large number potentially overflowing ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @@ -1586,7 +1586,7 @@ async def test_parse_retry_after_header( headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers) - assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] + assert calculated == pytest.approx(timeout, 1 * 0.875) # pyright: ignore[reportUnknownMemberType] @mock.patch("writerai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url)