From 5e0f1b6c3e931d15919e1595a3b23291dd5a3239 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 9 May 2025 19:01:20 +0000 Subject: [PATCH 01/47] Adding more documentation (#45) * Adding more documentation * Minor edit * No longer using testcode * Completed transactions documentation * Completed async documentation * Completed user documentation * Completed overview documentation * Completed overview documentation * Completed cursor documentation * Completed errno documentation * Completed errors documentation * Completed compression documentation * Completed logging documentation * Completed helpers documentation * Fixing test and lint * Added authentication documentation * Minor fixes * TLS docs * HTTP docs * Serialization docs * Migration docs --- README.md | 4 +- arangoasync/aql.py | 5 + arangoasync/client.py | 4 +- arangoasync/collection.py | 29 ++++- arangoasync/connection.py | 3 + arangoasync/database.py | 4 - arangoasync/http.py | 22 ++++ docs/aql.rst | 165 ++++++++++++++++++++++++++++- docs/async.rst | 148 +++++++++++++++++++++++++- docs/authentication.rst | 117 ++++++++++++++++++++ docs/certificates.rst | 110 +++++++++++++++++++ docs/collection.rst | 9 +- docs/compression.rst | 56 ++++++++++ docs/cursor.rst | 217 ++++++++++++++++++++++++++++++++++++++ docs/database.rst | 2 + docs/document.rst | 30 +++--- docs/errno.rst | 11 +- docs/errors.rst | 101 +++++++++++++++++- docs/helpers.rst | 88 ++++++++++++++++ docs/http.rst | 136 ++++++++++++++++++++++++ docs/index.rst | 26 +++-- docs/indexes.rst | 10 +- docs/logging.rst | 30 ++++++ docs/migration.rst | 94 +++++++++++++++++ docs/overview.rst | 23 ++++ docs/serialization.rst | 183 ++++++++++++++++++++++++++++++++ docs/specs.rst | 6 -- docs/transaction.rst | 76 +++++++++++++ docs/user.rst | 90 +++++++++++++++- tests/test_async.py | 2 +- tests/test_client.py | 18 ++-- 31 files changed, 1756 insertions(+), 63 deletions(-) create mode 100644 docs/authentication.rst create mode 100644 docs/certificates.rst create mode 100644 docs/compression.rst create mode 100644 docs/cursor.rst create mode 100644 docs/helpers.rst create mode 100644 docs/http.rst create mode 100644 docs/logging.rst create mode 100644 docs/migration.rst create mode 100644 docs/serialization.rst diff --git a/README.md b/README.md index 9e124b6..ea91bbf 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ Python driver for [ArangoDB](https://www.arangodb.com), a scalable multi-model database natively supporting documents, graphs and search. -This is the _asyncio_ alternative of the officially supported [python-arango](https://github.com/arangodb/python-arango) +This is the _asyncio_ alternative of the [python-arango](https://github.com/arangodb/python-arango) driver. **Note: This project is still in active development, features might be added or removed.** @@ -25,7 +25,7 @@ driver. ## Requirements - ArangoDB version 3.11+ -- Python version 3.9+ +- Python version 3.10+ ## Installation diff --git a/arangoasync/aql.py b/arangoasync/aql.py index c0e1b29..57d57e1 100644 --- a/arangoasync/aql.py +++ b/arangoasync/aql.py @@ -238,6 +238,11 @@ def name(self) -> str: """Return the name of the current database.""" return self._executor.db_name + @property + def context(self) -> str: + """Return the current API execution context.""" + return self._executor.context + @property def serializer(self) -> Serializer[Json]: """Return the serializer.""" diff --git a/arangoasync/client.py b/arangoasync/client.py index 1b1159f..235cfae 100644 --- a/arangoasync/client.py +++ b/arangoasync/client.py @@ -139,7 +139,9 @@ def version(self) -> str: async def close(self) -> None: """Close HTTP sessions.""" - await asyncio.gather(*(session.close() for session in self._sessions)) + await asyncio.gather( + *(self._http_client.close_session(session) for session in self._sessions) + ) async def db( self, diff --git a/arangoasync/collection.py b/arangoasync/collection.py index b6bb483..3b4e5a9 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -251,6 +251,15 @@ def name(self) -> str: """ return self._name + @property + def context(self) -> str: + """Return the context of the collection. + + Returns: + str: Context. + """ + return self._executor.context + @property def db_name(self) -> str: """Return the name of the current database. @@ -270,9 +279,17 @@ def deserializer(self) -> Deserializer[Json, Jsons]: """Return the deserializer.""" return self._executor.deserializer - async def indexes(self) -> Result[List[IndexProperties]]: + async def indexes( + self, + with_stats: Optional[bool] = None, + with_hidden: Optional[bool] = None, + ) -> Result[List[IndexProperties]]: """Fetch all index descriptions for the given collection. + Args: + with_stats (bool | None): Whether to include figures and estimates in the result. + with_hidden (bool | None): Whether to include hidden indexes in the result. + Returns: list: List of index properties. @@ -282,10 +299,16 @@ async def indexes(self) -> Result[List[IndexProperties]]: References: - `list-all-indexes-of-a-collection `__ """ # noqa: E501 + params: Params = dict(collection=self._name) + if with_stats is not None: + params["withStats"] = with_stats + if with_hidden is not None: + params["withHidden"] = with_hidden + request = Request( method=Method.GET, endpoint="/_api/index", - params=dict(collection=self._name), + params=params, ) def response_handler(resp: Response) -> List[IndexProperties]: @@ -564,6 +587,7 @@ async def get( Raises: DocumentRevisionError: If the revision is incorrect. DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. References: - `get-a-document `__ @@ -707,6 +731,7 @@ async def insert( Raises: DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. References: - `create-a-document `__ diff --git a/arangoasync/connection.py b/arangoasync/connection.py index cac1b01..f404248 100644 --- a/arangoasync/connection.py +++ b/arangoasync/connection.py @@ -177,6 +177,9 @@ async def process_request(self, request: Request) -> Response: host_index = self._host_resolver.get_host_index() for tries in range(self._host_resolver.max_tries): try: + logger.debug( + f"Sending request to host {host_index} ({tries}): {request}" + ) resp = await self._http_client.send_request( self._sessions[host_index], request ) diff --git a/arangoasync/database.py b/arangoasync/database.py index 3022cc4..e1200df 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -596,7 +596,6 @@ async def create_collection( ) def response_handler(resp: Response) -> StandardCollection[T, U, V]: - nonlocal doc_serializer, doc_deserializer if not resp.is_success: raise CollectionCreateError(resp, request) if doc_serializer is None: @@ -648,7 +647,6 @@ async def delete_collection( ) def response_handler(resp: Response) -> bool: - nonlocal ignore_missing if resp.is_success: return True if resp.status_code == HTTP_NOT_FOUND and ignore_missing: @@ -1001,7 +999,6 @@ async def update_permission( ) def response_handler(resp: Response) -> bool: - nonlocal ignore_failure if resp.is_success: return True if ignore_failure: @@ -1046,7 +1043,6 @@ async def reset_permission( ) def response_handler(resp: Response) -> bool: - nonlocal ignore_failure if resp.is_success: return True if ignore_failure: diff --git a/arangoasync/http.py b/arangoasync/http.py index 02b88da..7fb4724 100644 --- a/arangoasync/http.py +++ b/arangoasync/http.py @@ -33,6 +33,8 @@ class HTTPClient(ABC): # pragma: no cover class MyCustomHTTPClient(HTTPClient): def create_session(self, host): pass + async def close_session(self, session): + pass async def send_request(self, session, request): pass """ @@ -52,6 +54,18 @@ def create_session(self, host: str) -> Any: """ raise NotImplementedError + @abstractmethod + async def close_session(self, session: Any) -> None: + """Close the session. + + Note: + This method must be overridden by the user. + + Args: + session (Any): Client session object. + """ + raise NotImplementedError + @abstractmethod async def send_request( self, @@ -129,6 +143,14 @@ def create_session(self, host: str) -> ClientSession: read_bufsize=self._read_bufsize, ) + async def close_session(self, session: ClientSession) -> None: + """Close the session. + + Args: + session (Any): Client session object. + """ + await session.close() + async def send_request( self, session: ClientSession, diff --git a/docs/aql.rst b/docs/aql.rst index 914c982..69a9bf6 100644 --- a/docs/aql.rst +++ b/docs/aql.rst @@ -5,6 +5,167 @@ AQL to SQL for relational databases, but without the support for data definition operations such as creating or deleting :doc:`databases `, :doc:`collections ` or :doc:`indexes `. For more -information, refer to `ArangoDB manual`_. +information, refer to `ArangoDB Manual`_. -.. _ArangoDB manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arangodb.com + +AQL Queries +=========== + +AQL queries are invoked from AQL wrapper. Executing queries returns +:doc:`cursors `. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient, AQLQueryKillError + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Insert some test documents into "students" collection. + await students.insert_many([ + {"_key": "Abby", "age": 22}, + {"_key": "John", "age": 18}, + {"_key": "Mary", "age": 21} + ]) + + # Get the AQL API wrapper. + aql = db.aql + + # Retrieve the execution plan without running the query. + plan = await aql.explain("FOR doc IN students RETURN doc") + + # Validate the query without executing it. + validate = await aql.validate("FOR doc IN students RETURN doc") + + # Execute the query + cursor = await db.aql.execute( + "FOR doc IN students FILTER doc.age < @value RETURN doc", + bind_vars={"value": 19} + ) + + # Iterate through the result cursor + student_keys = [] + async for doc in cursor: + student_keys.append(doc) + + # List currently running queries. + queries = await aql.queries() + + # List any slow queries. + slow_queries = await aql.slow_queries() + + # Clear slow AQL queries if any. + await aql.clear_slow_queries() + + # Retrieve AQL query tracking properties. + await aql.tracking() + + # Configure AQL query tracking properties. + await aql.set_tracking( + max_slow_queries=10, + track_bind_vars=True, + track_slow_queries=True + ) + + # Kill a running query (this should fail due to invalid ID). + try: + await aql.kill("some_query_id") + except AQLQueryKillError as err: + assert err.http_code == 404 + +See :class:`arangoasync.aql.AQL` for API specification. + + +AQL User Functions +================== + +**AQL User Functions** are custom functions you define in Javascript to extend +AQL functionality. They are somewhat similar to SQL procedures. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the AQL API wrapper. + aql = db.aql + + # Create a new AQL user function. + await aql.create_function( + # Grouping by name prefix is supported. + name="functions::temperature::converter", + code="function (celsius) { return celsius * 1.8 + 32; }" + ) + + # List AQL user functions. + functions = await aql.functions() + + # Delete an existing AQL user function. + await aql.delete_function("functions::temperature::converter") + +See :class:`arangoasync.aql.AQL` for API specification. + + +AQL Query Cache +=============== + +**AQL Query Cache** is used to minimize redundant calculation of the same query +results. It is useful when read queries are issued frequently and write queries +are not. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the AQL API wrapper. + aql = db.aql + + # Retrieve AQL query cache properties. + await aql.cache.properties() + + # Configure AQL query cache properties. + await aql.cache.configure(mode="demand", max_results=10000) + + # List results cache entries. + entries = await aql.cache.entries() + + # List plan cache entries. + plan_entries = await aql.cache.plan_entries() + + # Clear results in AQL query cache. + await aql.cache.clear() + + # Clear results in AQL query plan cache. + await aql.cache.clear_plan() + +See :class:`arangoasync.aql.AQLQueryCache` for API specification. diff --git a/docs/async.rst b/docs/async.rst index a47b131..3fe31ff 100644 --- a/docs/async.rst +++ b/docs/async.rst @@ -1,6 +1,148 @@ Async API Execution ------------------- -In **asynchronous API executions**, python-arango-async sends API requests to ArangoDB in -fire-and-forget style. The server processes the requests in the background, and -the results can be retrieved once available via `AsyncJob` objects. +In **asynchronous API executions**, the driver sends API requests to ArangoDB in +fire-and-forget style. The server processes them in the background, and +the results can be retrieved once available via :class:`arangoasync.job.AsyncJob` objects. + +**Example:** + +.. code-block:: python + + import time + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.errno import HTTP_BAD_PARAMETER + from arangoasync.exceptions import ( + AQLQueryExecuteError, + AsyncJobCancelError, + AsyncJobClearError, + ) + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Begin async execution. This returns an instance of AsyncDatabase, a + # database-level API wrapper tailored specifically for async execution. + async_db = db.begin_async_execution(return_result=True) + + # Child wrappers are also tailored for async execution. + async_aql = async_db.aql + async_col = async_db.collection("students") + + # API execution context is always set to "async". + assert async_db.context == "async" + assert async_aql.context == "async" + assert async_col.context == "async" + + # On API execution, AsyncJob objects are returned instead of results. + job1 = await async_col.insert({"_key": "Neal"}) + job2 = await async_col.insert({"_key": "Lily"}) + job3 = await async_aql.execute("RETURN 100000") + job4 = await async_aql.execute("INVALID QUERY") # Fails due to syntax error. + + # Retrieve the status of each async job. + for job in [job1, job2, job3, job4]: + # Job status can be "pending" or "done". + assert await job.status() in {"pending", "done"} + + # Let's wait until the jobs are finished. + while await job.status() != "done": + time.sleep(0.1) + + # Retrieve the results of successful jobs. + metadata = await job1.result() + assert metadata["_id"] == "students/Neal" + + metadata = await job2.result() + assert metadata["_id"] == "students/Lily" + + cursor = await job3.result() + assert await cursor.next() == 100000 + + # If a job fails, the exception is propagated up during result retrieval. + try: + result = await job4.result() + except AQLQueryExecuteError as err: + assert err.http_code == HTTP_BAD_PARAMETER + + # Cancel a job. Only pending jobs still in queue may be cancelled. + # Since job3 is done, there is nothing to cancel and an exception is raised. + try: + await job3.cancel() + except AsyncJobCancelError as err: + print(err.message) + + # Clear the result of a job from ArangoDB server to free up resources. + # Result of job4 was removed from the server automatically upon retrieval, + # so attempt to clear it raises an exception. + try: + await job4.clear() + except AsyncJobClearError as err: + print(err.message) + + # List the IDs of the first 100 async jobs completed. + jobs_done = await db.async_jobs(status="done", count=100) + + # List the IDs of the first 100 async jobs still pending. + jobs_pending = await db.async_jobs(status='pending', count=100) + + # Clear all async jobs still sitting on the server. + await db.clear_async_jobs() + +Cursors returned from async API wrappers will no longer send async requests when they fetch more results, but behave +like regular cursors instead. This makes sense, because the point of cursors is iteration, whereas async jobs are meant +for one-shot requests. However, the first result retrieval is still async, and only then the cursor is returned, making +async AQL requests effective for queries with a long execution time. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Insert some documents into the collection. + await students.insert_many([{"_key": "Neal"}, {"_key": "Lily"}]) + + # Begin async execution. + async_db = db.begin_async_execution(return_result=True) + + aql = async_db.aql + job = await aql.execute( + f"FOR d IN {students.name} SORT d._key RETURN d", + count=True, + batch_size=1, + ttl=1000, + ) + await job.wait() + + # Iterate through the cursor. + # Although the request to fetch the cursor is async, its underlying executor is no longer async. + # Next batches will be fetched in real-time. + doc_cnt = 0 + cursor = await job.result() + async with cursor as ctx: + async for _ in ctx: + doc_cnt += 1 + assert doc_cnt == 2 + +.. note:: + Be mindful of server-side memory capacity when issuing a large number of + async requests in small time interval. + +See :class:`arangoasync.database.AsyncDatabase` and :class:`arangoasync.job.AsyncJob` for API specification. diff --git a/docs/authentication.rst b/docs/authentication.rst new file mode 100644 index 0000000..b7dff45 --- /dev/null +++ b/docs/authentication.rst @@ -0,0 +1,117 @@ +Authentication +-------------- + +Two HTTP authentication methods are supported out of the box: + +1. Basic username and password authentication +2. JSON Web Tokens (JWT) + +Basic Authentication +==================== + +This is the default authentication method. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth( + username="root", + password="passwd", + encoding="utf-8" # Encoding for the password, default is utf-8. + ) + + # Connect to "test" database as root user. + db = await client.db( + "test", # database name + auth_method="basic", # use basic authentication (default) + auth=auth, # authentication details + verify=True, # verify the connection (optional) + ) + +JSON Web Tokens (JWT) +===================== + +You can obtain the JWT token from the use server using username and password. +Upon expiration, the token gets refreshed automatically and requests are retried. +The client and server clocks must be synchronized for the automatic refresh +to work correctly. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Successful authentication with auth only + db = await client.db( + "test", + auth_method="jwt", + auth=auth, + verify=True, + ) + + # Now you have the token on hand. + token = db.connection.token + + # You can use the token directly. + db = await client.db("test", auth_method="jwt", token=token, verify=True) + + # In order to allow the token to be automatically refreshed, you should use both auth and token. + db = await client.db( + "test", + auth_method="jwt", + auth=auth, + token=token, + verify=True, + ) + + # Force a token refresh. + await db.connection.refresh_token() + new_token = db.connection.token + + # Log in with the first token. + db2 = await client.db( + "test", + auth_method="jwt", + token=token, + verify=True, + ) + + # You can manually set tokens. + db2.connection.token = new_token + await db2.connection.ping() + + +If you configured a superuser token, you don't need to provide any credentials. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import JwtToken + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + + # Generate a JWT token for authentication. You must know the "secret". + token = JwtToken.generate_token("secret") + + # Superuser authentication, no need for the auth parameter. + db = await client.db( + "test", + auth_method="superuser", + token=token, + verify=True + ) diff --git a/docs/certificates.rst b/docs/certificates.rst new file mode 100644 index 0000000..c0665fa --- /dev/null +++ b/docs/certificates.rst @@ -0,0 +1,110 @@ +TLS +--- + +When you need fine-grained control over TLS settings, you build a Python +:class:`ssl.SSLContext` and hand it to the :class:`arangoasync.http.DefaultHTTPClient` class. +Here are the most common patterns. + + +Basic client-side HTTPS with default settings +============================================= + +Create a “secure by default” client context. This will verify server certificates against your +OS trust store and check hostnames. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.http import DefaultHTTPClient + import ssl + + # Create a default client context. + ssl_ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH) + http_client = DefaultHTTPClient(ssl_context=ssl_ctx) + + # Initialize the client for ArangoDB. + client = ArangoClient( + hosts="https://localhost:8529", + http_client=http_client, + ) + +Custom CA bundle +================ + +If you have a custom CA file, this allows you to trust the private CA. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.http import DefaultHTTPClient + import ssl + + # Use a custom CA bundle. + ssl_ctx = ssl.create_default_context(cafile="path/to/ca.pem") + http_client = DefaultHTTPClient(ssl_context=ssl_ctx) + + # Initialize the client for ArangoDB. + client = ArangoClient( + hosts="https://localhost:8529", + http_client=http_client, + ) + +Disabling certificate verification +================================== + +If you want to disable *all* certification checks (not recommended), create an unverified +context. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.http import DefaultHTTPClient + import ssl + + # Disable certificate verification. + ssl_ctx = ssl._create_unverified_context() + http_client = DefaultHTTPClient(ssl_context=ssl_ctx) + + # Initialize the client for ArangoDB. + client = ArangoClient( + hosts="https://localhost:8529", + http_client=http_client, + ) + +Use a client certificate chain +============================== + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.http import DefaultHTTPClient + import ssl + + # Load a certificate chain. + ssl_ctx = ssl.create_default_context(cafile="path/to/ca.pem") + ssl_ctx.load_cert_chain(certfile="path/to/cert.pem", keyfile="path/to/key.pem") + http_client = DefaultHTTPClient(ssl_context=ssl_ctx) + + # Initialize the client for ArangoDB. + client = ArangoClient( + hosts="https://localhost:8529", + http_client=http_client, + ) + +.. note:: + For best performance, re-use one SSLContext across many requests/sessions to amortize handshake cost. + +If you want to have fine-grained control over the HTTP connection, you should define +your HTTP client as described in the :ref:`HTTP` section. diff --git a/docs/collection.rst b/docs/collection.rst index 42487f6..e6a846f 100644 --- a/docs/collection.rst +++ b/docs/collection.rst @@ -3,7 +3,12 @@ Collections A **collection** contains :doc:`documents `. It is uniquely identified by its name which must consist only of hyphen, underscore and alphanumeric -characters. +characters. There are three types of collections in python-arango: + +* **Standard Collection:** contains regular documents. +* **Vertex Collection:** contains vertex documents for graphs (not supported yet). +* **Edge Collection:** contains edge documents for graphs (not supported yet). + Here is an example showing how you can manage standard collections: @@ -40,3 +45,5 @@ Here is an example showing how you can manage standard collections: # Delete the collection. await db.delete_collection("students") + +See :class:`arangoasync.collection.StandardCollection` for API specification. diff --git a/docs/compression.rst b/docs/compression.rst new file mode 100644 index 0000000..114f83e --- /dev/null +++ b/docs/compression.rst @@ -0,0 +1,56 @@ +Compression +------------ + +The :class:`arangoasync.client.ArangoClient` lets you define the preferred compression policy for request and responses. By default +compression is disabled. You can change this by passing the `compression` parameter when creating the client. You may use +:class:`arangoasync.compression.DefaultCompressionManager` or a custom subclass of :class:`arangoasync.compression.CompressionManager`. + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.compression import DefaultCompressionManager + + client = ArangoClient( + hosts="http://localhost:8529", + compression=DefaultCompressionManager(), + ) + +Furthermore, you can customize the request compression policy by defining the minimum size of the request body that +should be compressed and the desired compression level. Or, in order to explicitly disable compression, you can set the +threshold parameter to -1. + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.compression import DefaultCompressionManager + + # Disable request compression. + client1 = ArangoClient( + hosts="http://localhost:8529", + compression=DefaultCompressionManager(threshold=-1), + ) + + # Enable request compression with a minimum size of 2 KB and a compression level of 8. + client2 = ArangoClient( + hosts="http://localhost:8529", + compression=DefaultCompressionManager(threshold=2048, level=8), + ) + +You can set the `accept` parameter in order to inform the server that the client prefers compressed responses (in the form +of an *Accept-Encoding* header). By default the `DefaultCompressionManager` is configured to accept responses compressed using +the *deflate* algorithm. Note that the server may or may not honor this preference, depending on how it is +configured. This can be controlled by setting the `--http.compress-response-threshold` option to a value greater than 0 +when starting the ArangoDB server. + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.compression import AcceptEncoding, DefaultCompressionManager + + # Accept compressed responses explicitly. + client = ArangoClient( + hosts="http://localhost:8529", + compression=DefaultCompressionManager(accept=AcceptEncoding.DEFLATE), + ) + +See the :class:`arangoasync.compression.CompressionManager` class for more details on how to customize the compression policy. diff --git a/docs/cursor.rst b/docs/cursor.rst new file mode 100644 index 0000000..9d2d2bf --- /dev/null +++ b/docs/cursor.rst @@ -0,0 +1,217 @@ +Cursors +------- + +Many operations provided by the driver (e.g. executing :doc:`aql` queries) +return result **cursors** to batch the network communication between ArangoDB +server and the client. Each HTTP request from a cursor fetches the +next batch of results (usually documents). Depending on the query, the total +number of items in the result set may or may not be known in advance. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Set up some test data to query against. + await db.collection("students").insert_many([ + {"_key": "Abby", "age": 22}, + {"_key": "John", "age": 18}, + {"_key": "Mary", "age": 21}, + {"_key": "Suzy", "age": 23}, + {"_key": "Dave", "age": 20} + ]) + + # Execute an AQL query which returns a cursor object. + cursor = await db.aql.execute( + "FOR doc IN students FILTER doc.age > @val RETURN doc", + bind_vars={"val": 17}, + batch_size=2, + count=True + ) + + # Get the cursor ID. + cid = cursor.id + + # Get the items in the current batch. + batch = cursor.batch + + # Check if the current batch is empty. + is_empty = cursor.empty() + + # Get the total count of the result set. + cnt = cursor.count + + # Flag indicating if there are more to be fetched from server. + has_more = cursor.has_more + + # Flag indicating if the results are cached. + is_cached = cursor.cached + + # Get the cursor statistics. + stats = cursor.statistics + + # Get the performance profile. + profile = cursor.profile + + # Get any warnings produced from the query. + warnings = cursor.warnings + + # Return the next item from the cursor. If current batch is depleted, the + # next batch is fetched from the server automatically. + await cursor.next() + + # Return the next item from the cursor. If current batch is depleted, an + # exception is thrown. You need to fetch the next batch manually. + cursor.pop() + + # Fetch the next batch and add them to the cursor object. + await cursor.fetch() + + # Delete the cursor from the server. + await cursor.close() + +See :class:`arangoasync.cursor.Cursor` for API specification. + +Cursors can be used together with a context manager to ensure that the resources get freed up +when the cursor is no longer needed. Asynchronous iteration is also supported, allowing you to +iterate over the cursor results without blocking the event loop. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.exceptions import CursorCloseError + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Set up some test data to query against. + await db.collection("students").insert_many([ + {"_key": "Abby", "age": 22}, + {"_key": "John", "age": 18}, + {"_key": "Mary", "age": 21}, + {"_key": "Suzy", "age": 23}, + {"_key": "Dave", "age": 20} + ]) + + # Execute an AQL query which returns a cursor object. + cursor = await db.aql.execute( + "FOR doc IN students FILTER doc.age > @val RETURN doc", + bind_vars={"val": 17}, + batch_size=2, + count=True + ) + + # Iterate over the cursor in an async context manager. + async with cursor as ctx: + async for student in ctx: + print(student) + + # The cursor is automatically closed when exiting the context manager. + try: + await cursor.close() + except CursorCloseError: + print(f"Cursor already closed!") + +If the fetched result batch is depleted while you are iterating over a cursor +(or while calling the method :func:`arangoasync.cursor.Cursor.next`), the driver +automatically sends an HTTP request to the server in order to fetch the next batch +(just-in-time style). To control exactly when the fetches occur, you can use +methods like :func:`arangoasync.cursor.Cursor.fetch` and :func:`arangoasync.cursor.Cursor.pop` +instead. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Set up some test data to query against. + await db.collection("students").insert_many([ + {"_key": "Abby", "age": 22}, + {"_key": "John", "age": 18}, + {"_key": "Mary", "age": 21} + ]) + + # You can manually fetch and pop for finer control. + cursor = await db.aql.execute("FOR doc IN students RETURN doc", batch_size=1) + while cursor.has_more: # Fetch until nothing is left on the server. + await cursor.fetch() + while not cursor.empty(): # Pop until nothing is left on the cursor. + student = cursor.pop() + print(student) + +You can use the `allow_retry` parameter of :func:`arangoasync.aql.AQL.execute` +to automatically retry the request if the cursor encountered any issues during +the previous fetch operation. Note that this feature causes the server to +cache the last batch. To allow re-fetching of the very last batch of the query, +the server cannot automatically delete the cursor. Once you have successfully +received the last batch, you should call :func:`arangoasync.cursor.Cursor.close`, +or use a context manager to ensure the cursor is closed properly. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.typings import QueryProperties + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Set up some test data to query against. + await db.collection("students").insert_many([ + {"_key": "Abby", "age": 22}, + {"_key": "John", "age": 18}, + {"_key": "Mary", "age": 21} + ]) + + cursor = await db.aql.execute( + "FOR doc IN students RETURN doc", + batch_size=1, + options=QueryProperties(allow_retry=True) + ) + + while cursor.has_more: + try: + await cursor.fetch() + except ConnectionError: + # Retry the request. + continue + + while not cursor.empty(): + student = cursor.pop() + print(student) + + # Delete the cursor from the server. + await cursor.close() + +For more information about various query properties, see :class:`arangoasync.typings.QueryProperties`. diff --git a/docs/database.rst b/docs/database.rst index f510cb2..851cc9d 100644 --- a/docs/database.rst +++ b/docs/database.rst @@ -59,3 +59,5 @@ information. # Delete the database. Note that the new users will remain. await sys_db.delete_database("test") + +See :class:`arangoasync.client.ArangoClient` and :class:`arangoasync.database.StandardDatabase` for API specification. diff --git a/docs/document.rst b/docs/document.rst index 3398bf9..ff9121e 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -20,26 +20,26 @@ properties: to validate a document against its current revision. For more information on documents and associated terminologies, refer to -`ArangoDB manual`_. Here is an example of a valid document in "students" +`ArangoDB Manual`_. Here is an example of a valid document in "students" collection: -.. _ArangoDB manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arangodb.com -.. testcode:: +.. code-block:: json { - '_id': 'students/bruce', - '_key': 'bruce', - '_rev': '_Wm3dzEi--_', - 'first_name': 'Bruce', - 'last_name': 'Wayne', - 'address': { - 'street' : '1007 Mountain Dr.', - 'city': 'Gotham', - 'state': 'NJ' + "_id": "students/bruce", + "_key": "bruce", + "_rev": "_Wm3dzEi--_", + "first_name": "Bruce", + "last_name": "Wayne", + "address": { + "street" : "1007 Mountain Dr.", + "city": "Gotham", + "state": "NJ" }, - 'is_rich': True, - 'friends': ['robin', 'gordon'] + "is_rich": true, + "friends": ["robin", "gordon"] } Standard documents are managed via collection API wrapper: @@ -129,3 +129,5 @@ Standard documents are managed via collection API wrapper: # Delete one or more matching documents. await students.delete_match({"first": "Emma"}) + +See :class:`arangoasync.database.StandardDatabase` and :class:`arangoasync.collection.StandardCollection` for API specification. diff --git a/docs/errno.rst b/docs/errno.rst index f4ee457..06011fd 100644 --- a/docs/errno.rst +++ b/docs/errno.rst @@ -1,11 +1,11 @@ Error Codes ----------- -Python-Arango-Async provides ArangoDB error code constants for convenience. +ArangoDB error code constants are provided for convenience. **Example** -.. testcode:: +.. code-block:: python from arangoasync import errno @@ -14,6 +14,9 @@ Python-Arango-Async provides ArangoDB error code constants for convenience. assert errno.DOCUMENT_REV_BAD == 1239 assert errno.DOCUMENT_NOT_FOUND == 1202 -For more information, refer to `ArangoDB manual`_. +You can see the full list of error codes in the `errno.py`_ file. -.. _ArangoDB manual: https://www.arangodb.com/docs/stable/appendix-error-codes.html +For more information, refer to the `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://www.arangodb.com/docs/stable/appendix-error-codes.html +.. _errno.py: https://github.com/arangodb/python-arango-async/blob/main/arangoasync/errno.py diff --git a/docs/errors.rst b/docs/errors.rst index cba6d92..87036f0 100644 --- a/docs/errors.rst +++ b/docs/errors.rst @@ -5,6 +5,20 @@ All python-arango exceptions inherit :class:`arangoasync.exceptions.ArangoError` which splits into subclasses :class:`arangoasync.exceptions.ArangoServerError` and :class:`arangoasync.exceptions.ArangoClientError`. +**Example** + +.. code-block:: python + + from arangoasync.exceptions import ArangoClientError, ArangoServerError + + try: + # Some operation that raises an error + except ArangoClientError: + # An error occurred on the client side + except ArangoServerError: + # An error occurred on the server side + + Server Errors ============= @@ -12,9 +26,94 @@ Server Errors HTTP responses coming from ArangoDB. Each exception object contains the error message, error code and HTTP request response details. +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient, ArangoServerError, DocumentInsertError + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + try: + await students.insert({"_key": "John"}) + await students.insert({"_key": "John"}) # duplicate key error + except DocumentInsertError as err: + assert isinstance(err, ArangoServerError) + assert err.source == "server" + + msg = err.message # Exception message usually from ArangoDB + err_msg = err.error_message # Raw error message from ArangoDB + code = err.error_code # Error code from ArangoDB + url = err.url # URL (API endpoint) + method = err.http_method # HTTP method (e.g. "POST") + headers = err.http_headers # Response headers + http_code = err.http_code # Status code (e.g. 200) + + # You can inspect the ArangoDB response directly. + response = err.response + method = response.method # HTTP method + headers = response.headers # Response headers + url = response.url # Full request URL + success = response.is_success # Set to True if HTTP code is 2XX + raw_body = response.raw_body # Raw string response body + status_txt = response.status_text # Status text (e.g "OK") + status_code = response.status_code # Status code (e.g. 200) + err_code = response.error_code # Error code from ArangoDB + + # You can also inspect the request sent to ArangoDB. + request = err.request + method = request.method # HTTP method + endpoint = request.endpoint # API endpoint starting with "/_api" + headers = request.headers # Request headers + params = request.params # URL parameters + data = request.data # Request payload + Client Errors ============= :class:`arangoasync.exceptions.ArangoClientError` exceptions originate from -python-arango client itself. They do not contain error codes nor HTTP request +driver client itself. They do not contain error codes nor HTTP request response details. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient, ArangoClientError, DocumentParseError + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + try: + await students.get({"_id": "invalid_id"}) # malformed document + except DocumentParseError as err: + assert isinstance(err, ArangoClientError) + assert err.source == "client" + + # Only the error message is set. + print(err.message) + +Exceptions +========== + +Below are all exceptions. + +.. automodule:: arangoasync.exceptions + :members: diff --git a/docs/helpers.rst b/docs/helpers.rst new file mode 100644 index 0000000..e16fe0c --- /dev/null +++ b/docs/helpers.rst @@ -0,0 +1,88 @@ +.. _Helpers: + +Helper Types +------------ + +The driver comes with a set of helper types and wrappers to make it easier to work with the ArangoDB API. These are +designed to behave like dictionaries, but with some additional features and methods. See the :class:`arangoasync.typings.JsonWrapper` class for more details. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.typings import QueryProperties + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + properties = QueryProperties( + allow_dirty_reads=True, + allow_retry=False, + fail_on_warning=True, + fill_block_cache=False, + full_count=True, + intermediate_commit_count=1000, + intermediate_commit_size=1048576, + max_dnf_condition_members=10, + max_nodes_per_callstack=100, + max_number_of_plans=5, + max_runtime=60.0, + max_transaction_size=10485760, + max_warning_count=10, + optimizer={"rules": ["-all", "+use-indexes"]}, + profile=1, + satellite_sync_wait=10.0, + skip_inaccessible_collections=True, + spill_over_threshold_memory_usage=10485760, + spill_over_threshold_num_rows=100000, + stream=True, + use_plan_cache=True, + ) + + # The types are fully serializable. + print(properties) + + await db.aql.execute( + "FOR doc IN students RETURN doc", + batch_size=1, + options=properties, + ) + +You can easily customize the data representation using formatters. By default, keys are in the format used by the ArangoDB +API, but you can change them to snake_case if you prefer. See :func:`arangoasync.typings.JsonWrapper.format` for more details. + +**Example:** + +.. code-block:: python + + from arangoasync.typings import Json, UserInfo + + data = { + "user": "john", + "password": "secret", + "active": True, + "extra": {"role": "admin"}, + } + user_info = UserInfo(**data) + + def uppercase_formatter(data: Json) -> Json: + result: Json = {} + for key, value in data.items(): + result[key.upper()] = value + return result + + print(user_info.format(uppercase_formatter)) + +Helpers +======= + +Below are all the available helpers. + +.. automodule:: arangoasync.typings + :members: diff --git a/docs/http.rst b/docs/http.rst new file mode 100644 index 0000000..53a5480 --- /dev/null +++ b/docs/http.rst @@ -0,0 +1,136 @@ +.. _HTTP: + +HTTP +---- + +You can define your own HTTP client for sending requests to +ArangoDB server. The default implementation uses the aiohttp_ library. + +Your HTTP client must inherit :class:`arangoasync.http.HTTPClient` and implement the +following abstract methods: + +* :func:`arangoasync.http.HTTPClient.create_session` +* :func:`arangoasync.http.HTTPClient.close_session` +* :func:`arangoasync.http.HTTPClient.send_request` + +Let's take for example, the default implementation of :class:`arangoasync.http.AioHTTPClient`: + +* The **create_session** method returns a :class:`aiohttp.ClientSession` instance per + connected host (coordinator). The session objects are stored in the client. +* The **close_session** method performs the necessary cleanup for a :class:`aiohttp.ClientSession` instance. + This is usually called only by the client. +* The **send_request** method must uses the session to send an HTTP request, and + returns a fully populated instance of :class:`arangoasync.response.Response`. + +**Example:** + +Suppose you're working on a project that uses httpx_ as a dependency and you want your +own HTTP client implementation on top of :class:`httpx.AsyncClient`. Your ``HttpxHTTPClient`` +class might look something like this: + +.. code-block:: python + + import httpx + import ssl + from typing import Any, Optional + from arangoasync.exceptions import ClientConnectionError + from arangoasync.http import HTTPClient + from arangoasync.request import Request + from arangoasync.response import Response + + class HttpxHTTPClient(HTTPClient): + """HTTP client implementation on top of httpx.AsyncClient. + + Args: + limits (httpx.Limits | None): Connection pool limits.n + timeout (httpx.Timeout | float | None): Request timeout settings. + ssl_context (ssl.SSLContext | bool): SSL validation mode. + `True` (default) uses httpx’s default validation (system CAs). + `False` disables SSL checks. + Or pass a custom `ssl.SSLContext`. + """ + + def __init__( + self, + limits: Optional[httpx.Limits] = None, + timeout: Optional[httpx.Timeout | float] = None, + ssl_context: bool | ssl.SSLContext = True, + ) -> None: + self._limits = limits or httpx.Limits( + max_connections=100, + max_keepalive_connections=20 + ) + self._timeout = timeout or httpx.Timeout(300.0, connect=60.0) + if ssl_context is True: + self._verify: bool | ssl.SSLContext = True + elif ssl_context is False: + self._verify = False + else: + self._verify = ssl_context + + def create_session(self, host: str) -> httpx.AsyncClient: + return httpx.AsyncClient( + base_url=host, + limits=self._limits, + timeout=self._timeout, + verify=self._verify, + ) + + async def close_session(self, session: httpx.AsyncClient) -> None: + await session.aclose() + + async def send_request( + self, + session: httpx.AsyncClient, + request: Request, + ) -> Response: + auth: Any = None + if request.auth is not None: + auth = httpx.BasicAuth( + username=request.auth.username, + password=request.auth.password, + ) + + try: + resp = await session.request( + method=request.method.name, + url=request.endpoint, + headers=request.normalized_headers(), + params=request.normalized_params(), + content=request.data, + auth=auth, + ) + raw_body = resp.content + return Response( + method=request.method, + url=str(resp.url), + headers=resp.headers, + status_code=resp.status_code, + status_text=resp.reason_phrase, + raw_body=raw_body, + ) + except httpx.HTTPError as e: + raise ClientConnectionError(str(e)) from e + +Then you would inject your client as follows: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient( + hosts="http://localhost:8529", + http_client=HttpxHTTPClient(), + ) as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth, verify=True) + + # List all collections. + cols = await db.collections() + +.. _aiohttp: https://docs.aiohttp.org/en/stable/ +.. _httpx: https://www.python-httpx.org/ diff --git a/docs/index.rst b/docs/index.rst index 9e71989..3252629 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,7 +5,7 @@ python-arango-async ------------------- -Welcome to the documentation for **python-arango-async**, a Python driver for ArangoDB_. +Welcome to the documentation for python-arango-async_, a Python driver for ArangoDB_. **Note: This project is still in active development, features might be added or removed.** @@ -13,7 +13,7 @@ Requirements ============= - ArangoDB version 3.11+ -- Python version 3.9+ +- Python version 3.10+ Installation ============ @@ -25,7 +25,7 @@ Installation Contents ======== -Basics +**Basics** .. toctree:: :maxdepth: 1 @@ -37,36 +37,45 @@ Basics document aql -Specialized Features +**Specialized Features** .. toctree:: :maxdepth: 1 transaction -API Executions +**API Executions** .. toctree:: :maxdepth: 1 async -Administration +**Administration** .. toctree:: :maxdepth: 1 user -Miscellaneous +**Miscellaneous** .. toctree:: :maxdepth: 1 + cursor + authentication + http + certificates + compression + serialization errors errno + logging + helpers + migration -Development +**Development** .. toctree:: :maxdepth: 1 @@ -74,3 +83,4 @@ Development specs .. _ArangoDB: https://www.arangodb.com +.. _python-arango-async: https://github.com/arangodb/python-arango-async diff --git a/docs/indexes.rst b/docs/indexes.rst index e8ae208..911efaa 100644 --- a/docs/indexes.rst +++ b/docs/indexes.rst @@ -5,9 +5,9 @@ Indexes collection has a primary hash index on ``_key`` field by default. This index cannot be deleted or modified. Every edge collection has additional indexes on fields ``_from`` and ``_to``. For more information on indexes, refer to -`ArangoDB manual`_. +`ArangoDB Manual`_. -.. _ArangoDB manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arangodb.com **Example:** @@ -30,11 +30,11 @@ on fields ``_from`` and ``_to``. For more information on indexes, refer to indexes = await cities.indexes() # Add a new persistent index on document fields "continent" and "country". - persistent_index = {"type": "persistent", "fields": ["continent", "country"], "unique": True} + # Indexes may be added with a name that can be referred to in AQL queries. persistent_index = await cities.add_index( type="persistent", fields=['continent', 'country'], - options={"unique": True} + options={"unique": True, "name": "continent_country_index"} ) # Add new fulltext indexes on fields "continent" and "country". @@ -49,3 +49,5 @@ on fields ``_from`` and ``_to``. For more information on indexes, refer to # Delete the last index from the collection. await cities.delete_index(index["id"]) + +See :class:`arangoasync.collection.StandardCollection` for API specification. diff --git a/docs/logging.rst b/docs/logging.rst new file mode 100644 index 0000000..bd7eeb3 --- /dev/null +++ b/docs/logging.rst @@ -0,0 +1,30 @@ +Logging +------- + +If if helps to debug your application, you can enable logging to see all the requests sent by the driver to the ArangoDB server. + +.. code-block:: python + + import logging + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.logger import logger + + # Set up logging + logging.basicConfig(level=logging.DEBUG) + logger.setLevel(level=logging.DEBUG) + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Insert a document into the collection. + await students.insert({"name": "John Doe", "age": 25}) + +The insert generates a log message similar to: `DEBUG:arangoasync:Sending request to host 0 (0): `. diff --git a/docs/migration.rst b/docs/migration.rst new file mode 100644 index 0000000..f26e7d6 --- /dev/null +++ b/docs/migration.rst @@ -0,0 +1,94 @@ +Coming from python-arango +------------------------- + +Generally, migrating from `python-arango`_ should be a smooth transition. For the most part, the API is similar, +but there are a few things to note._ + +Helpers +======= + +The current driver comes with :ref:`Helpers`, because we want to: + +1. Facilitate better type hinting and auto-completion in IDEs. +2. Ensure an easier 1-to-1 mapping of the ArangoDB API. + +For example, coming from the synchronous driver, creating a new user looks like this: + +.. code-block:: python + + sys_db.create_user( + username="johndoe@gmail.com", + password="first_password", + active=True, + extra={"team": "backend", "title": "engineer"} + ) + +In the asynchronous driver, it looks like this: + +.. code-block:: python + + from arangoasync.typings import UserInfo + + user_info = UserInfo( + username="johndoe@gmail.com", + password="first_password", + active=True, + extra={"team": "backend", "title": "engineer"} + ) + await sys_db.create_user(user_info) + +CamelCase vs. snake_case +======================== + +Upon returning results, for the most part, the synchronous driver mostly tries to stick to snake case. Unfortunately, +this is not always consistent. + +.. code-block:: python + + status = db.status() + assert "host" in status + assert "operation_mode" in status + +The asynchronous driver, however, tries to stick to a simple rule: + +* If the API returns a camel case key, it will be returned as is. +* Parameters passed from client to server use the snake case equivalent of the camel case keys required by the API + (e.g. `userName` becomes `user_name`). This is done to ensure PEP8 compatibility. + +.. code-block:: python + + from arangoasync.typings import ServerStatusInformation + + status: ServerStatusInformation = await db.status() + assert "host" in status + assert "operationMode" in status + print(status.host) + print(status.operation_mode) + +You can use the :func:`arangoasync.typings.JsonWrapper.format` method to gain more control over the formatting of +keys. + +Serialization +============= + +Check out the :ref:`Serialization` section to learn more about how to implement your own serializer/deserializer. The +current driver makes use of generic types and allows for a higher degree of customization. + +Mixing sync and async +===================== + +Sometimes you may need to mix the two. This is not recommended, but it takes time to migrate everything. If you need to +do this, you can use the :func:`asyncio.to_thread` function to run a synchronous function in separate thread, without +compromising the async event loop. + +.. code-block:: python + + # Use a python-arango synchronous client + sys_db = await asyncio.to_thread( + client.db, + "_system", + username="root", + password="passwd" + ) + +.. _python-arango: https://docs.python-arango.com diff --git a/docs/overview.rst b/docs/overview.rst index ce3f45a..6f1f76a 100644 --- a/docs/overview.rst +++ b/docs/overview.rst @@ -38,3 +38,26 @@ Here is an example showing how **python-arango-async** client can be used: student_names = [] async for doc in cursor: student_names.append(doc["name"]) + +You may also use the client without a context manager, but you must ensure to close the client when done: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + client = ArangoClient(hosts="http://localhost:8529") + auth = Auth(username="root", password="passwd") + sys_db = await client.db("_system", auth=auth) + + # Create a new database named "test". + await sys_db.create_database("test") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # List all collections in the "test" database. + collections = await db.collections() + + # Close the client when done. + await client.close() diff --git a/docs/serialization.rst b/docs/serialization.rst new file mode 100644 index 0000000..1866ee3 --- /dev/null +++ b/docs/serialization.rst @@ -0,0 +1,183 @@ +.. _Serialization: + +Serialization +------------- + +There are two serialization mechanisms employed by the driver: + +* JSON serialization/deserialization +* Document serialization/deserialization + +All serializers must inherit from the :class:`arangoasync.serialization.Serializer` class. They must +implement a :func:`arangoasync.serialization.Serializer.dumps` method can handle both +single objects and sequences. + +Deserializers mush inherit from the :class:`arangoasync.serialization.Deserializer` class. These have +two methods, :func:`arangoasync.serialization.Deserializer.loads` and :func:`arangoasync.serialization.Deserializer.loads_many`, +which must handle loading of a single document and multiple documents, respectively. + +JSON +==== + +Usually there's no need to implement your own JSON serializer/deserializer, but such an +implementation could look like the following. + +**Example:** + +.. code-block:: python + + import json + from typing import Sequence, cast + from arangoasync.collection import StandardCollection + from arangoasync.database import StandardDatabase + from arangoasync.exceptions import DeserializationError, SerializationError + from arangoasync.serialization import Serializer, Deserializer + from arangoasync.typings import Json, Jsons + + + class CustomJsonSerializer(Serializer[Json]): + def dumps(self, data: Json | Sequence[str | Json]) -> str: + try: + return json.dumps(data, separators=(",", ":")) + except Exception as e: + raise SerializationError("Failed to serialize data to JSON.") from e + + + class CustomJsonDeserializer(Deserializer[Json, Jsons]): + def loads(self, data: bytes) -> Json: + try: + return json.loads(data) # type: ignore[no-any-return] + except Exception as e: + raise DeserializationError("Failed to deserialize data from JSON.") from e + + def loads_many(self, data: bytes) -> Jsons: + return self.loads(data) # type: ignore[return-value] + +You would then use the custom serializer/deserializer when creating a client: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient( + hosts="http://localhost:8529", + serializer=CustomJsonSerializer(), + deserializer=CustomJsonDeserializer(), + ) as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + test = await client.db("test", auth=auth) + +Documents +========= + +By default, the JSON serializer/deserializer is used for documents too, but you can provide your own +document serializer and deserializer for fine-grained control over the format of a collection. Say +that you are modeling your students data using Pydantic_. You want to be able to insert documents +of a certain type, and also be able to read them back. More so, you would like to get multiple documents +back using one of the formats provided by pandas_. + +**Example:** + +.. code-block:: python + + import json + import pandas as pd + import pydantic + import pydantic_core + from typing import Sequence, cast + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.collection import StandardCollection + from arangoasync.database import StandardDatabase + from arangoasync.exceptions import DeserializationError, SerializationError + from arangoasync.serialization import Serializer, Deserializer + from arangoasync.typings import Json, Jsons + + + class Student(pydantic.BaseModel): + name: str + age: int + + + class StudentSerializer(Serializer[Student]): + def dumps(self, data: Student | Sequence[Student | str]) -> str: + try: + if isinstance(data, Student): + return data.model_dump_json() + else: + # You are required to support both str and Student types. + serialized_data = [] + for student in data: + if isinstance(student, str): + serialized_data.append(student) + else: + serialized_data.append(student.model_dump()) + return json.dumps(serialized_data, separators=(",", ":")) + except Exception as e: + raise SerializationError("Failed to serialize data.") from e + + + class StudentDeserializer(Deserializer[Student, pd.DataFrame]): + def loads(self, data: bytes) -> Student: + # Load a single document. + try: + return Student.model_validate(pydantic_core.from_json(data)) + except Exception as e: + raise DeserializationError("Failed to deserialize data.") from e + + def loads_many(self, data: bytes) -> pd.DataFrame: + # Load multiple documents. + return pd.DataFrame(json.loads(data)) + +You would then use the custom serializer/deserializer when working with collections: + +**Example:** + +.. code-block:: python + + async def main(): + # Initialize the client for ArangoDB. + async with ArangoClient( + hosts="http://localhost:8529", + serializer=CustomJsonSerializer(), + deserializer=CustomJsonDeserializer(), + ) as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db: StandardDatabase = await client.db("test", auth=auth, verify=True) + + # Populate the "students" collection. + col = cast( + StandardCollection[Student, Student, pd.DataFrame], + db.collection( + "students", + doc_serializer=StudentSerializer(), + doc_deserializer=StudentDeserializer()), + ) + + # Insert one document. + doc = cast(Json, await col.insert(Student(name="John Doe", age=20))) + + # Insert multiple documents. + docs = cast(Jsons, await col.insert_many([ + Student(name="Jane Doe", age=22), + Student(name="Alice Smith", age=19), + Student(name="Bob Johnson", age=21), + ])) + + # Get one document. + john = await col.get(doc) + assert type(john) == Student + + # Get multiple documents. + keys = [doc["_key"] for doc in docs] + students = await col.get_many(keys) + assert type(students) == pd.DataFrame + +.. _Pydantic: https://docs.pydantic.dev/latest/ +.. _pandas: https://pandas.pydata.org/ diff --git a/docs/specs.rst b/docs/specs.rst index 2de6ae9..dc92bd9 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -34,9 +34,6 @@ python-arango-async. .. automodule:: arangoasync.connection :members: -.. automodule:: arangoasync.exceptions - :members: - .. automodule:: arangoasync.http :members: @@ -49,8 +46,5 @@ python-arango-async. .. automodule:: arangoasync.response :members: -.. automodule:: arangoasync.typings - :members: - .. automodule:: arangoasync.result :members: diff --git a/docs/transaction.rst b/docs/transaction.rst index 225e226..e36738d 100644 --- a/docs/transaction.rst +++ b/docs/transaction.rst @@ -3,3 +3,79 @@ Transactions In **transactions**, requests to ArangoDB server are committed as a single, logical unit of work (ACID compliant). + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Begin a transaction. Read and write collections must be declared ahead of + # time. This returns an instance of TransactionDatabase, database-level + # API wrapper tailored specifically for executing transactions. + txn_db = await db.begin_transaction(read=students.name, write=students.name) + + # The API wrapper is specific to a single transaction with a unique ID. + trx_id = txn_db.transaction_id + + # Child wrappers are also tailored only for the specific transaction. + txn_aql = txn_db.aql + txn_col = txn_db.collection("students") + + # API execution context is always set to "transaction". + assert txn_db.context == "transaction" + assert txn_aql.context == "transaction" + assert txn_col.context == "transaction" + + assert "_rev" in await txn_col.insert({"_key": "Abby"}) + assert "_rev" in await txn_col.insert({"_key": "John"}) + assert "_rev" in await txn_col.insert({"_key": "Mary"}) + + # Check the transaction status. + status = await txn_db.transaction_status() + + # Commit the transaction. + await txn_db.commit_transaction() + assert await students.has("Abby") + assert await students.has("John") + assert await students.has("Mary") + assert await students.count() == 3 + + # Begin another transaction. Note that the wrappers above are specific to + # the last transaction and cannot be reused. New ones must be created. + txn_db = await db.begin_transaction(read=students.name, write=students.name) + txn_col = txn_db.collection("students") + assert "_rev" in await txn_col.insert({"_key": "Kate"}) + assert "_rev" in await txn_col.insert({"_key": "Mike"}) + assert "_rev" in await txn_col.insert({"_key": "Lily"}) + assert await txn_col.count() == 6 + + # Abort the transaction + await txn_db.abort_transaction() + assert not await students.has("Kate") + assert not await students.has("Mike") + assert not await students.has("Lily") + assert await students.count() == 3 # transaction is aborted so txn_col cannot be used + + # Fetch an existing transaction. Useful if you have received a Transaction ID + # from an external system. + original_txn = await db.begin_transaction(write='students') + txn_col = original_txn.collection('students') + assert '_rev' in await txn_col.insert({'_key': 'Chip'}) + txn_db = db.fetch_transaction(original_txn.transaction_id) + txn_col = txn_db.collection('students') + assert '_rev' in await txn_col.insert({'_key': 'Alya'}) + await txn_db.abort_transaction() + +See :class:`arangoasync.database.TransactionDatabase` for API specification. diff --git a/docs/user.rst b/docs/user.rst index 015858c..c5184a5 100644 --- a/docs/user.rst +++ b/docs/user.rst @@ -1,5 +1,93 @@ Users and Permissions --------------------- -Python-arango provides operations for managing users and permissions. Most of +ArangoDB provides operations for managing users and permissions. Most of these operations can only be performed by admin users via ``_system`` database. + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + from arangoasync.typings import UserInfo + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + sys_db = await client.db("_system", auth=auth) + + # List all users. + users = await sys_db.users() + + johndoe = UserInfo( + user="johndoe@gmail.com", + password="first_password", + active=True, + extra={"team": "backend", "title": "engineer"} + ) + + # Create a new user. + await sys_db.create_user(johndoe) + + # Check if a user exists. + assert await sys_db.has_user(johndoe.user) is True + assert await sys_db.has_user("johndoe@gmail.com") is True + + # Retrieve details of a user. + user_info = await sys_db.user(johndoe.user) + assert user_info.user == "johndoe@gmail.com" + + # Update an existing user. + johndoe["password"] = "second_password" + await sys_db.update_user(johndoe) + + # Replace an existing user. + johndoe["password"] = "third_password" + await sys_db.replace_user(johndoe) + + # Retrieve user permissions for all databases and collections. + await sys_db.permissions(johndoe.user) + + # Retrieve user permission for "test" database. + perm = await sys_db.permission( + username="johndoe@gmail.com", + database="test" + ) + + # Retrieve user permission for "students" collection in "test" database. + perm = await sys_db.permission( + username="johndoe@gmail.com", + database="test", + collection="students" + ) + + # Update user permission for "test" database. + await sys_db.update_permission( + username="johndoe@gmail.com", + permission="rw", + database="test" + ) + + # Update user permission for "students" collection in "test" database. + await sys_db.update_permission( + username="johndoe@gmail.com", + permission="ro", + database="test", + collection="students" + ) + + # Reset user permission for "test" database. + await sys_db.reset_permission( + username="johndoe@gmail.com", + database="test" + ) + + # Reset user permission for "students" collection in "test" database. + await sys_db.reset_permission( + username="johndoe@gmail.com", + database="test", + collection="students" + ) + +See :class:`arangoasync.database.StandardDatabase` for API specification. diff --git a/tests/test_async.py b/tests/test_async.py index c4f7988..1bd3bda 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -126,7 +126,7 @@ async def test_async_cursor(db, doc_col, docs): ) await job.wait() - # Get the cursor. Bear in mind that its underlying executor is async. + # Get the cursor. Bear in mind that its underlying executor is no longer async. doc_cnt = 0 cursor = await job.result() async with cursor as ctx: diff --git a/tests/test_client.py b/tests/test_client.py index 718d307..6210412 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -102,15 +102,15 @@ async def test_client_jwt_auth(url, sys_db_name, basic_auth_root): async with ArangoClient(hosts=url) as client: await client.db(sys_db_name, auth_method="jwt", token=token, verify=True) - # successful authentication with both - async with ArangoClient(hosts=url) as client: - await client.db( - sys_db_name, - auth_method="jwt", - auth=basic_auth_root, - token=token, - verify=True, - ) + # successful authentication with both + async with ArangoClient(hosts=url) as client: + await client.db( + sys_db_name, + auth_method="jwt", + auth=basic_auth_root, + token=token, + verify=True, + ) # auth and token missing async with ArangoClient(hosts=url) as client: From 9957ba48ca7d52f2716073f6da2f0fce41b643db Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 11 May 2025 16:39:52 +0000 Subject: [PATCH 02/47] Fixing pypi (#46) * Checking out tags * Fixing setuptools deprecation warnings * Updating python versions --- .github/workflows/pypi.yaml | 3 +++ README.md | 2 +- pyproject.toml | 8 ++++---- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml index 5bfab90..8106d23 100644 --- a/.github/workflows/pypi.yaml +++ b/.github/workflows/pypi.yaml @@ -10,6 +10,9 @@ jobs: steps: - uses: actions/checkout@v3 + with: + fetch-depth: 0 + fetch-tags: true - uses: actions/setup-python@v4 with: diff --git a/README.md b/README.md index ea91bbf..4f6cd2b 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [![Last commit](https://img.shields.io/github/last-commit/arangodb/python-arango-async)](https://github.com/arangodb/python-arango-async/commits/main) [![PyPI version badge](https://img.shields.io/pypi/v/python-arango-async?color=3775A9&style=for-the-badge&logo=pypi&logoColor=FFD43B)](https://pypi.org/project/python-arango-async/) -[![Python versions badge](https://img.shields.io/badge/3.9%2B-3776AB?style=for-the-badge&logo=python&logoColor=FFD43B&label=Python)](https://pypi.org/project/python-arango-async/) +[![Python versions badge](https://img.shields.io/badge/3.10%2B-3776AB?style=for-the-badge&logo=python&logoColor=FFD43B&label=Python)](https://pypi.org/project/python-arango-async/) [![License](https://img.shields.io/github/license/arangodb/python-arango?color=9E2165&style=for-the-badge)](https://github.com/arangodb/python-arango/blob/main/LICENSE) [![Code style: black](https://img.shields.io/static/v1?style=for-the-badge&label=code%20style&message=black&color=black)](https://github.com/psf/black) diff --git a/pyproject.toml b/pyproject.toml index d5003c4..c5c890f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,19 +19,19 @@ maintainers = [ keywords = ["arangodb", "python", "driver", "async"] readme = "README.md" dynamic = ["version"] -license = { file = "LICENSE" } -requires-python = ">=3.9" +license = "MIT" +license-files = ["LICENSE"] +requires-python = ">=3.10" classifiers = [ "Intended Audience :: Developers", - "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Topic :: Documentation :: Sphinx", "Typing :: Typed", ] From bd6632a5c0b6045b21afefe363854d2cb88996d3 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 11 May 2025 16:53:49 +0000 Subject: [PATCH 03/47] Fetching all tags (#47) * Fetching all tags * Small change --- .github/workflows/pypi.yaml | 9 +++++---- docs/serialization.rst | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml index 8106d23..5f99d4a 100644 --- a/.github/workflows/pypi.yaml +++ b/.github/workflows/pypi.yaml @@ -9,10 +9,11 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 - fetch-tags: true + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Fetch all tags and branches + run: git fetch --prune --unshallow - uses: actions/setup-python@v4 with: diff --git a/docs/serialization.rst b/docs/serialization.rst index 1866ee3..9fe520e 100644 --- a/docs/serialization.rst +++ b/docs/serialization.rst @@ -133,7 +133,7 @@ back using one of the formats provided by pandas_. # Load multiple documents. return pd.DataFrame(json.loads(data)) -You would then use the custom serializer/deserializer when working with collections: +You would then use the custom serializer/deserializer when working with collections. **Example:** From 0b8c7425e67c600997ca8a45753c4485e6e5da1f Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 11 May 2025 17:13:14 +0000 Subject: [PATCH 04/47] checking tags (#48) --- .github/workflows/pypi.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml index 5f99d4a..9ccf59d 100644 --- a/.github/workflows/pypi.yaml +++ b/.github/workflows/pypi.yaml @@ -11,9 +11,9 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v3 - - - name: Fetch all tags and branches - run: git fetch --prune --unshallow + with: + fetch-depth: 0 + fetch-tags: true - uses: actions/setup-python@v4 with: From b4c346614d22f2598e53de59fe30fe256cd53f15 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 11 May 2025 17:21:01 +0000 Subject: [PATCH 05/47] checking tags (#49) --- .github/workflows/pypi.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml index 9ccf59d..6ef8a07 100644 --- a/.github/workflows/pypi.yaml +++ b/.github/workflows/pypi.yaml @@ -14,6 +14,7 @@ jobs: with: fetch-depth: 0 fetch-tags: true + ref: ${{ github.event.release.tag_name }} - uses: actions/setup-python@v4 with: From ca57e8034c06ccc1d43a1ad1060316e27973e7de Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 11 May 2025 17:25:54 +0000 Subject: [PATCH 06/47] bumping the version (#50) --- .github/workflows/pypi.yaml | 1 - arangoasync/version.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/pypi.yaml b/.github/workflows/pypi.yaml index 6ef8a07..9ccf59d 100644 --- a/.github/workflows/pypi.yaml +++ b/.github/workflows/pypi.yaml @@ -14,7 +14,6 @@ jobs: with: fetch-depth: 0 fetch-tags: true - ref: ${{ github.event.release.tag_name }} - uses: actions/setup-python@v4 with: diff --git a/arangoasync/version.py b/arangoasync/version.py index 3b93d0b..27fdca4 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "0.0.2" +__version__ = "0.0.3" From 741e3a3048fa628ee1737671de14c61ee374facd Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 11 May 2025 19:39:20 +0000 Subject: [PATCH 07/47] Basic graph support (#51) --- arangoasync/database.py | 175 ++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 12 +++ arangoasync/graph.py | 21 +++++ arangoasync/typings.py | 128 ++++++++++++++++++++++++++-- tests/test_graph.py | 37 ++++++++ tests/test_typings.py | 38 +++++++++ 6 files changed, 406 insertions(+), 5 deletions(-) create mode 100644 arangoasync/graph.py create mode 100644 tests/test_graph.py diff --git a/arangoasync/database.py b/arangoasync/database.py index e1200df..60f6ee9 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -23,6 +23,9 @@ DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + GraphCreateError, + GraphDeleteError, + GraphListError, JWTSecretListError, JWTSecretReloadError, PermissionGetError, @@ -50,6 +53,7 @@ DefaultApiExecutor, TransactionApiExecutor, ) +from arangoasync.graph import Graph from arangoasync.request import Method, Request from arangoasync.response import Response from arangoasync.result import Result @@ -58,6 +62,8 @@ CollectionInfo, CollectionType, DatabaseProperties, + GraphOptions, + GraphProperties, Json, Jsons, KeyOptions, @@ -655,6 +661,175 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + def graph(self, name: str) -> Graph: + """Return the graph API wrapper. + + Args: + name (str): Graph name. + + Returns: + Graph: Graph API wrapper. + """ + return Graph(self._executor, name) + + async def has_graph(self, name: str) -> Result[bool]: + """Check if a graph exists in the database. + + Args: + name (str): Graph name. + + Returns: + bool: True if the graph exists, False otherwise. + + Raises: + GraphListError: If the operation fails. + """ + request = Request(method=Method.GET, endpoint=f"/_api/gharial/{name}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND: + return False + raise GraphListError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def graphs(self) -> Result[List[GraphProperties]]: + """List all graphs stored in the database. + + Returns: + list: Graph properties. + + Raises: + GraphListError: If the operation fails. + + References: + - `list-all-graphs `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/gharial") + + def response_handler(resp: Response) -> List[GraphProperties]: + if not resp.is_success: + raise GraphListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return [GraphProperties(u) for u in body["graphs"]] + + return await self._executor.execute(request, response_handler) + + async def create_graph( + self, + name: str, + edge_definitions: Optional[Sequence[Json]] = None, + is_disjoint: Optional[bool] = None, + is_smart: Optional[bool] = None, + options: Optional[GraphOptions | Json] = None, + orphan_collections: Optional[Sequence[str]] = None, + wait_for_sync: Optional[bool] = None, + ) -> Result[Graph]: + """Create a new graph. + + Args: + name (str): Graph name. + edge_definitions (list | None): List of edge definitions, where each edge + definition entry is a dictionary with fields "collection" (name of the + edge collection), "from" (list of vertex collection names) and "to" + (list of vertex collection names). + is_disjoint (bool | None): Whether to create a Disjoint SmartGraph + instead of a regular SmartGraph (Enterprise Edition only). + is_smart (bool | None): Define if the created graph should be smart + (Enterprise Edition only). + options (GraphOptions | dict | None): Options for creating collections + within this graph. + orphan_collections (list | None): An array of additional vertex + collections. Documents in these collections do not have edges + within this graph. + wait_for_sync (bool | None): If `True`, wait until everything is + synced to disk. + + Returns: + Graph: Graph API wrapper. + + Raises: + GraphCreateError: If the operation fails. + + References: + - `create-a-graph `__ + """ # noqa: E501 + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + + data: Json = {"name": name} + if edge_definitions is not None: + data["edgeDefinitions"] = edge_definitions + if is_disjoint is not None: + data["isDisjoint"] = is_disjoint + if is_smart is not None: + data["isSmart"] = is_smart + if options is not None: + if isinstance(options, GraphOptions): + data["options"] = options.to_dict() + else: + data["options"] = options + if orphan_collections is not None: + data["orphanCollections"] = orphan_collections + + request = Request( + method=Method.POST, + endpoint="/_api/gharial", + data=self.serializer.dumps(data), + params=params, + ) + + def response_handler(resp: Response) -> Graph: + if resp.is_success: + return Graph(self._executor, name) + raise GraphCreateError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def delete_graph( + self, + name: str, + drop_collections: Optional[bool] = None, + ignore_missing: bool = False, + ) -> Result[bool]: + """Drops an existing graph object by name. + + Args: + name (str): Graph name. + drop_collections (bool | None): Optionally all collections not used by + other graphs can be dropped as well. + ignore_missing (bool): Do not raise an exception on missing graph. + + Returns: + bool: True if the graph was deleted successfully, `False` if the + graph was not found but **ignore_missing** was set to `True`. + + Raises: + GraphDeleteError: If the operation fails. + + References: + - `drop-a-graph `__ + """ # noqa: E501 + params: Params = {} + if drop_collections is not None: + params["dropCollections"] = drop_collections + + request = Request( + method=Method.DELETE, endpoint=f"/_api/gharial/{name}", params=params + ) + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise GraphDeleteError(resp, request) + return True + + return await self._executor.execute(request, response_handler) + async def has_user(self, username: str) -> Result[bool]: """Check if a user exists. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 1274df2..a62e64e 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -263,6 +263,18 @@ class DocumentUpdateError(ArangoServerError): """Failed to update document.""" +class GraphCreateError(ArangoServerError): + """Failed to create the graph.""" + + +class GraphDeleteError(ArangoServerError): + """Failed to delete the graph.""" + + +class GraphListError(ArangoServerError): + """Failed to retrieve graphs.""" + + class IndexCreateError(ArangoServerError): """Failed to create collection index.""" diff --git a/arangoasync/graph.py b/arangoasync/graph.py new file mode 100644 index 0000000..2047d96 --- /dev/null +++ b/arangoasync/graph.py @@ -0,0 +1,21 @@ +from arangoasync.executor import ApiExecutor + + +class Graph: + """Graph API wrapper, representing a graph in ArangoDB. + + Args: + executor: API executor. Required to execute the API requests. + """ + + def __init__(self, executor: ApiExecutor, name: str) -> None: + self._executor = executor + self._name = name + + def __repr__(self) -> str: + return f"" + + @property + def name(self) -> str: + """Name of the graph.""" + return self._name diff --git a/arangoasync/typings.py b/arangoasync/typings.py index 44631f8..86c32fd 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -167,6 +167,14 @@ def items(self) -> Iterator[Tuple[str, Any]]: """Return an iterator over the dictionary’s key-value pairs.""" return iter(self._data.items()) + def keys(self) -> Iterator[str]: + """Return an iterator over the dictionary’s keys.""" + return iter(self._data.keys()) + + def values(self) -> Iterator[Any]: + """Return an iterator over the dictionary’s values.""" + return iter(self._data.values()) + def to_dict(self) -> Json: """Return the dictionary.""" return self._data @@ -227,15 +235,15 @@ def __init__( data: Optional[Json] = None, ) -> None: if data is None: - data = { + data: Json = { # type: ignore[no-redef] "allowUserKeys": allow_user_keys, "type": generator_type, } if increment is not None: - data["increment"] = increment + data["increment"] = increment # type: ignore[index] if offset is not None: - data["offset"] = offset - super().__init__(data) + data["offset"] = offset # type: ignore[index] + super().__init__(cast(Json, data)) def validate(self) -> None: """Validate key options.""" @@ -386,7 +394,7 @@ def __init__( active: bool = True, extra: Optional[Json] = None, ) -> None: - data = {"user": user, "active": active} + data: Json = {"user": user, "active": active} if password is not None: data["password"] = password if extra is not None: @@ -1644,3 +1652,113 @@ def max_entry_size(self) -> int: @property def include_system(self) -> bool: return cast(bool, self._data.get("includeSystem", False)) + + +class GraphProperties(JsonWrapper): + """Graph properties. + + Example: + .. code-block:: json + + { + "_key" : "myGraph", + "edgeDefinitions" : [ + { + "collection" : "edges", + "from" : [ + "startVertices" + ], + "to" : [ + "endVertices" + ] + } + ], + "orphanCollections" : [ ], + "_rev" : "_jJdpHEy--_", + "_id" : "_graphs/myGraph", + "name" : "myGraph" + } + + References: + - `get-a-graph `__ + - `list-all-graphs `__ + - `create-a-graph `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def name(self) -> str: + return cast(str, self._data["name"]) + + @property + def edge_definitions(self) -> Jsons: + return cast(Jsons, self._data.get("edgeDefinitions", list())) + + @property + def orphan_collections(self) -> List[str]: + return cast(List[str], self._data.get("orphanCollections", list())) + + +class GraphOptions(JsonWrapper): + """Special options for graph creation. + + Args: + number_of_shards (int): The number of shards that is used for every + collection within this graph. Cannot be modified later. + replication_factor (int | str): The replication factor used when initially + creating collections for this graph. Can be set to "satellite" to create + a SatelliteGraph, which then ignores `numberOfShards`, + `minReplicationFactor`, and `writeConcern` (Enterprise Edition only). + satellites (list[str] | None): An array of collection names that is used to + create SatelliteCollections for a (Disjoint) SmartGraph using + SatelliteCollections (Enterprise Edition only). Each array element must + be a string and a valid collection name. + smart_graph_attribute (str | None): The attribute name that is used to + smartly shard the vertices of a graph. Only available in + Enterprise Edition. + write_concern (int | None): The write concern for new collections in the + graph. + """ # noqa: E501 + + def __init__( + self, + number_of_shards: Optional[int], + replication_factor: Optional[int | str], + satellites: Optional[List[str]], + smart_graph_attribute: Optional[str], + write_concern: Optional[int], + ) -> None: + data: Json = dict() + if number_of_shards is not None: + data["numberOfShards"] = number_of_shards + if replication_factor is not None: + data["replicationFactor"] = replication_factor + if satellites is not None: + data["satellites"] = satellites + if smart_graph_attribute is not None: + data["smartGraphAttribute"] = smart_graph_attribute + if write_concern is not None: + data["writeConcern"] = write_concern + super().__init__(data) + + @property + def number_of_shards(self) -> Optional[int]: + return cast(int, self._data.get("numberOfShards")) + + @property + def replication_factor(self) -> Optional[int | str]: + return cast(int | str, self._data.get("replicationFactor")) + + @property + def satellites(self) -> Optional[List[str]]: + return cast(Optional[List[str]], self._data.get("satellites")) + + @property + def smart_graph_attribute(self) -> Optional[str]: + return cast(Optional[str], self._data.get("smartGraphAttribute")) + + @property + def write_concern(self) -> Optional[int]: + return cast(Optional[int], self._data.get("writeConcern")) diff --git a/tests/test_graph.py b/tests/test_graph.py new file mode 100644 index 0000000..0967ff9 --- /dev/null +++ b/tests/test_graph.py @@ -0,0 +1,37 @@ +import pytest + +from arangoasync.exceptions import GraphCreateError, GraphDeleteError, GraphListError + + +@pytest.mark.asyncio +async def test_graph_basic(db, bad_db): + # Test the graph representation + graph = db.graph("test_graph") + assert graph.name == "test_graph" + assert "test_graph" in repr(graph) + + # Cannot find any graph + assert await db.graphs() == [] + assert await db.has_graph("fake_graph") is False + with pytest.raises(GraphListError): + await bad_db.has_graph("fake_graph") + with pytest.raises(GraphListError): + await bad_db.graphs() + + # Create a graph + graph = await db.create_graph("test_graph", wait_for_sync=True) + assert graph.name == "test_graph" + with pytest.raises(GraphCreateError): + await bad_db.create_graph("test_graph") + + # Check if the graph exists + assert await db.has_graph("test_graph") is True + graphs = await db.graphs() + assert len(graphs) == 1 + assert graphs[0].name == "test_graph" + + # Delete the graph + await db.delete_graph("test_graph") + assert await db.has_graph("test_graph") is False + with pytest.raises(GraphDeleteError): + await bad_db.delete_graph("test_graph") diff --git a/tests/test_typings.py b/tests/test_typings.py index 9d8e2d5..7a40c33 100644 --- a/tests/test_typings.py +++ b/tests/test_typings.py @@ -4,6 +4,8 @@ CollectionInfo, CollectionStatus, CollectionType, + GraphOptions, + GraphProperties, JsonWrapper, KeyOptions, QueryCacheProperties, @@ -23,6 +25,9 @@ def test_basic_wrapper(): assert wrapper["a"] == 1 assert wrapper["b"] == 2 + assert list(wrapper.keys()) == ["a", "b"] + assert list(wrapper.values()) == [1, 2] + wrapper["c"] = 3 assert wrapper["c"] == 3 @@ -330,3 +335,36 @@ def test_QueryCacheProperties(): assert cache_properties._data["maxResults"] == 128 assert cache_properties._data["maxEntrySize"] == 1024 assert cache_properties._data["includeSystem"] is False + + +def test_GraphProperties(): + data = { + "name": "myGraph", + "edgeDefinitions": [ + {"collection": "edges", "from": ["vertices1"], "to": ["vertices2"]} + ], + "orphanCollections": ["orphan1", "orphan2"], + } + graph_properties = GraphProperties(data) + + assert graph_properties.name == "myGraph" + assert graph_properties.edge_definitions == [ + {"collection": "edges", "from": ["vertices1"], "to": ["vertices2"]} + ] + assert graph_properties.orphan_collections == ["orphan1", "orphan2"] + + +def test_GraphOptions(): + graph_options = GraphOptions( + number_of_shards=3, + replication_factor=2, + satellites=["satellite1", "satellite2"], + smart_graph_attribute="region", + write_concern=1, + ) + + assert graph_options.number_of_shards == 3 + assert graph_options.replication_factor == 2 + assert graph_options.satellites == ["satellite1", "satellite2"] + assert graph_options.smart_graph_attribute == "region" + assert graph_options.write_concern == 1 From db0a397e873fdf683da505199d9f61d771bed964 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 1 Jun 2025 12:26:48 +0300 Subject: [PATCH 08/47] Graph Collections (#52) * Highlighting boolean values * Adding vertex and edge collection skeleton * Refactoring serializers * Using randomized graph name * Improving helper types * Facilitating edge and vertex collection creation * Vertex collection management * Edge collection management * Adding cluster testcase * Adding note about dictionary-like indexing * Inserting and retrieving vertex documents * Moving methods from StandardCollection to base Collection so they are available to other subclasses * Adding CRUD for vertex collections * Adding "has" for vertex collections * Marking tests as asyncio * Inserting and retrieving edges * Event loop scope * Event loop scope again * Updating edge * Edges CRUD * Extra edge methods * Fixing lint * Added github gist example * Adding graph docs * Adding graphs example in the readme --- README.md | 60 + arangoasync/collection.py | 2507 +++++++++++++++++++++++++------------ arangoasync/database.py | 102 +- arangoasync/exceptions.py | 40 + arangoasync/graph.py | 1035 ++++++++++++++- arangoasync/typings.py | 134 +- docs/collection.rst | 6 +- docs/document.rst | 20 + docs/graph.rst | 415 ++++++ docs/index.rst | 1 + docs/overview.rst | 64 +- docs/serialization.rst | 6 + docs/specs.rst | 3 + tests/conftest.py | 16 +- tests/helpers.py | 9 + tests/test_graph.py | 395 +++++- tests/test_typings.py | 18 + 17 files changed, 3986 insertions(+), 845 deletions(-) create mode 100644 docs/graph.rst diff --git a/README.md b/README.md index 4f6cd2b..507c3e9 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,67 @@ async def main(): student_names = [] async for doc in cursor: student_names.append(doc["name"]) +``` + +Another example with [graphs](https://docs.arangodb.com/stable/graphs/): +```python +async def main(): + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + if await db.has_graph("school"): + graph = db.graph("school") + else: + graph = await db.create_graph("school") + + # Create vertex collections for the graph. + students = await graph.create_vertex_collection("students") + lectures = await graph.create_vertex_collection("lectures") + + # Create an edge definition (relation) for the graph. + edges = await graph.create_edge_definition( + edge_collection="register", + from_vertex_collections=["students"], + to_vertex_collections=["lectures"] + ) + + # Insert vertex documents into "students" (from) vertex collection. + await students.insert({"_key": "01", "full_name": "Anna Smith"}) + await students.insert({"_key": "02", "full_name": "Jake Clark"}) + await students.insert({"_key": "03", "full_name": "Lisa Jones"}) + + # Insert vertex documents into "lectures" (to) vertex collection. + await lectures.insert({"_key": "MAT101", "title": "Calculus"}) + await lectures.insert({"_key": "STA101", "title": "Statistics"}) + await lectures.insert({"_key": "CSC101", "title": "Algorithms"}) + + # Insert edge documents into "register" edge collection. + await edges.insert({"_from": "students/01", "_to": "lectures/MAT101"}) + await edges.insert({"_from": "students/01", "_to": "lectures/STA101"}) + await edges.insert({"_from": "students/01", "_to": "lectures/CSC101"}) + await edges.insert({"_from": "students/02", "_to": "lectures/MAT101"}) + await edges.insert({"_from": "students/02", "_to": "lectures/STA101"}) + await edges.insert({"_from": "students/03", "_to": "lectures/CSC101"}) + + # Traverse the graph in outbound direction, breath-first. + query = """ + FOR v, e, p IN 1..3 OUTBOUND 'students/01' GRAPH 'school' + OPTIONS { bfs: true, uniqueVertices: 'global' } + RETURN {vertex: v, edge: e, path: p} + """ + + async with await db.aql.execute(query) as cursor: + async for doc in cursor: + print(doc) ``` Please see the [documentation](https://python-arango-async.readthedocs.io/en/latest/) for more details. diff --git a/arangoasync/collection.py b/arangoasync/collection.py index 3b4e5a9..c742714 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -1,7 +1,12 @@ -__all__ = ["Collection", "StandardCollection"] +__all__ = [ + "Collection", + "EdgeCollection", + "StandardCollection", + "VertexCollection", +] -from typing import Any, Generic, List, Optional, Sequence, Tuple, TypeVar, cast +from typing import Any, Generic, List, Literal, Optional, Sequence, TypeVar, cast from arangoasync.cursor import Cursor from arangoasync.errno import ( @@ -21,6 +26,7 @@ DocumentReplaceError, DocumentRevisionError, DocumentUpdateError, + EdgeListError, IndexCreateError, IndexDeleteError, IndexGetError, @@ -70,6 +76,26 @@ def __init__( self._doc_deserializer = doc_deserializer self._id_prefix = f"{self._name}/" + @staticmethod + def get_col_name(doc: str | Json) -> str: + """Extract the collection name from the document. + + Args: + doc (str | dict): Document ID or body with "_id" field. + + Returns: + str: Collection name. + + Raises: + DocumentParseError: If document ID is missing. + """ + try: + doc_id: str = doc if isinstance(doc, str) else doc["_id"] + except KeyError: + raise DocumentParseError('field "_id" required') + else: + return doc_id.split("/", 1)[0] + def _validate_id(self, doc_id: str) -> str: """Check the collection name in the document ID. @@ -86,11 +112,13 @@ def _validate_id(self, doc_id: str) -> str: raise DocumentParseError(f'Bad collection name in document ID "{doc_id}"') return doc_id - def _extract_id(self, body: Json) -> str: + def _extract_id(self, body: Json, validate: bool = True) -> str: """Extract the document ID from document body. Args: body (dict): Document body. + validate (bool): Whether to validate the document ID, + checking if it belongs to the current collection. Returns: str: Document ID. @@ -100,7 +128,10 @@ def _extract_id(self, body: Json) -> str: """ try: if "_id" in body: - return self._validate_id(body["_id"]) + if validate: + return self._validate_id(body["_id"]) + else: + return cast(str, body["_id"]) else: key: str = body["_key"] return self._id_prefix + key @@ -115,6 +146,9 @@ def _ensure_key_from_id(self, body: Json) -> Json: Returns: dict: Document body with "_key" field if it has "_id" field. + + Raises: + DocumentParseError: If document is malformed. """ if "_id" in body and "_key" not in body: doc_id = self._validate_id(body["_id"]) @@ -122,41 +156,32 @@ def _ensure_key_from_id(self, body: Json) -> Json: body["_key"] = doc_id[len(self._id_prefix) :] return body - def _prep_from_doc( - self, - document: str | Json, - rev: Optional[str] = None, - check_rev: bool = False, - ) -> Tuple[str, Json]: - """Prepare document ID, body and request headers before a query. + def _get_doc_id(self, document: str | Json, validate: bool = True) -> str: + """Prepare document ID before a query. Args: document (str | dict): Document ID, key or body. - rev (str | None): Document revision. - check_rev (bool): Whether to check the revision. + validate (bool): Whether to validate the document ID, + checking if it belongs to the current collection. Returns: Document ID and request headers. Raises: DocumentParseError: On missing ID and key. - TypeError: On bad document type. """ - if isinstance(document, dict): - doc_id = self._extract_id(document) - rev = rev or document.get("_rev") - elif isinstance(document, str): + if isinstance(document, str): if "/" in document: - doc_id = self._validate_id(document) + if validate: + doc_id = self._validate_id(document) + else: + doc_id = document else: doc_id = self._id_prefix + document else: - raise TypeError("Document must be str or a dict") + doc_id = self._extract_id(document, validate) - if not check_rev or rev is None: - return doc_id, {} - else: - return doc_id, {"If-Match": rev} + return doc_id def _build_filter_conditions(self, filters: Optional[Json]) -> str: """Build filter conditions for an AQL query. @@ -456,29 +481,6 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) - -class StandardCollection(Collection[T, U, V]): - """Standard collection API wrapper. - - Args: - executor (ApiExecutor): API executor. - name (str): Collection name - doc_serializer (Serializer): Document serializer. - doc_deserializer (Deserializer): Document deserializer. - """ - - def __init__( - self, - executor: ApiExecutor, - name: str, - doc_serializer: Serializer[T], - doc_deserializer: Deserializer[U, V], - ) -> None: - super().__init__(executor, name, doc_serializer, doc_deserializer) - - def __repr__(self) -> str: - return f"" - async def properties(self) -> Result[CollectionProperties]: """Return the full properties of the current collection. @@ -563,14 +565,14 @@ def response_handler(resp: Response) -> int: return await self._executor.execute(request, response_handler) - async def get( + async def has( self, document: str | Json, allow_dirty_read: bool = False, if_match: Optional[str] = None, if_none_match: Optional[str] = None, - ) -> Result[Optional[U]]: - """Return a document. + ) -> Result[bool]: + """Check if a document exists in the collection. Args: document (str | dict): Document ID, key or body. @@ -582,17 +584,16 @@ async def get( different revision than the given ETag. Returns: - Document or `None` if not found. + `True` if the document exists, `False` otherwise. Raises: DocumentRevisionError: If the revision is incorrect. DocumentGetError: If retrieval fails. - DocumentParseError: If the document is malformed. References: - - `get-a-document `__ + - `get-a-document-header `__ """ # noqa: E501 - handle, _ = self._prep_from_doc(document) + handle = self._get_doc_id(document) headers: RequestHeaders = {} if allow_dirty_read: @@ -603,19 +604,16 @@ async def get( headers["If-None-Match"] = if_none_match request = Request( - method=Method.GET, + method=Method.HEAD, endpoint=f"/_api/document/{handle}", headers=headers, ) - def response_handler(resp: Response) -> Optional[U]: + def response_handler(resp: Response) -> bool: if resp.is_success: - return self._doc_deserializer.loads(resp.raw_body) + return True elif resp.status_code == HTTP_NOT_FOUND: - if resp.error_code == DOCUMENT_NOT_FOUND: - return None - else: - raise DocumentGetError(resp, request) + return False elif resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) else: @@ -623,255 +621,1103 @@ def response_handler(resp: Response) -> Optional[U]: return await self._executor.execute(request, response_handler) - async def has( + async def get_many( self, - document: str | Json, - allow_dirty_read: bool = False, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - ) -> Result[bool]: - """Check if a document exists in the collection. + documents: Sequence[str | T], + allow_dirty_read: Optional[bool] = None, + ignore_revs: Optional[bool] = None, + ) -> Result[V]: + """Return multiple documents ignoring any missing ones. Args: - document (str | dict): Document ID, key or body. - Document body must contain the "_id" or "_key" field. - allow_dirty_read (bool): Allow reads from followers in a cluster. - if_match (str | None): The document is returned, if it has the same - revision as the given ETag. - if_none_match (str | None): The document is returned, if it has a - different revision than the given ETag. + documents (list): List of document IDs, keys or bodies. A search document + must contain at least a value for the `_key` field. A value for `_rev` + may be specified to verify whether the document has the same revision + value, unless `ignoreRevs` is set to false. + allow_dirty_read (bool | None): Allow reads from followers in a cluster. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only replaced if the current revision is the one + specified. Returns: - `True` if the document exists, `False` otherwise. + list: List of documents. Missing ones are not included. Raises: - DocumentRevisionError: If the revision is incorrect. DocumentGetError: If retrieval fails. References: - - `get-a-document-header `__ + - `get-multiple-documents `__ """ # noqa: E501 - handle, _ = self._prep_from_doc(document) + params: Params = {"onlyget": True} + if ignore_revs is not None: + params["ignoreRevs"] = ignore_revs headers: RequestHeaders = {} - if allow_dirty_read: - headers["x-arango-allow-dirty-read"] = "true" - if if_match is not None: - headers["If-Match"] = if_match - if if_none_match is not None: - headers["If-None-Match"] = if_none_match + if allow_dirty_read is not None: + if allow_dirty_read is True: + headers["x-arango-allow-dirty-read"] = "true" + else: + headers["x-arango-allow-dirty-read"] = "false" request = Request( - method=Method.HEAD, - endpoint=f"/_api/document/{handle}", + method=Method.PUT, + endpoint=f"/_api/document/{self.name}", + params=params, headers=headers, + data=self._doc_serializer.dumps(documents), ) - def response_handler(resp: Response) -> bool: - if resp.is_success: - return True - elif resp.status_code == HTTP_NOT_FOUND: - return False - elif resp.status_code == HTTP_PRECONDITION_FAILED: - raise DocumentRevisionError(resp, request) - else: + def response_handler(resp: Response) -> V: + if not resp.is_success: raise DocumentGetError(resp, request) + return self._doc_deserializer.loads_many(resp.raw_body) return await self._executor.execute(request, response_handler) - async def insert( + async def find( self, - document: T, - wait_for_sync: Optional[bool] = None, - return_new: Optional[bool] = None, - return_old: Optional[bool] = None, - silent: Optional[bool] = None, - overwrite: Optional[bool] = None, - overwrite_mode: Optional[str] = None, - keep_null: Optional[bool] = None, - merge_objects: Optional[bool] = None, - refill_index_caches: Optional[bool] = None, - version_attribute: Optional[str] = None, - ) -> Result[bool | Json]: - """Insert a new document. + filters: Optional[Json] = None, + skip: Optional[int] = None, + limit: Optional[int | str] = None, + allow_dirty_read: Optional[bool] = False, + sort: Optional[Jsons] = None, + ) -> Result[Cursor]: + """Return all documents that match the given filters. Args: - document (dict): Document to insert. If it contains the "_key" or "_id" - field, the value is used as the key of the new document (otherwise - it is auto-generated). Any "_rev" field is ignored. - wait_for_sync (bool | None): Wait until document has been synced to disk. - return_new (bool | None): Additionally return the complete new document - under the attribute `new` in the result. - return_old (bool | None): Additionally return the complete old document - under the attribute `old` in the result. Only available if the - `overwrite` option is used. - silent (bool | None): If set to `True`, no document metadata is returned. - This can be used to save resources. - overwrite (bool | None): If set to `True`, operation does not fail on - duplicate key and existing document is overwritten (replace-insert). - overwrite_mode (str | None): Overwrite mode. Supersedes **overwrite** - option. May be one of "ignore", "replace", "update" or "conflict". - keep_null (bool | None): If set to `True`, fields with value None are - retained in the document. Otherwise, they are removed completely. - Applies only when **overwrite_mode** is set to "update" - (update-insert). - merge_objects (bool | None): If set to `True`, sub-dictionaries are merged - instead of the new one overwriting the old one. Applies only when - **overwrite_mode** is set to "update" (update-insert). - refill_index_caches (bool | None): Whether to add new entries to - in-memory index caches if document insertions affect the edge index - or cache-enabled persistent indexes. - version_attribute (str | None): Support for simple external versioning to - document operations. Only applicable if **overwrite** is set to `True` - or **overwrite_mode** is set to "update" or "replace". + filters (dict | None): Query filters. + skip (int | None): Number of documents to skip. + limit (int | str | None): Maximum number of documents to return. + allow_dirty_read (bool): Allow reads from followers in a cluster. + sort (list | None): Document sort parameters. Returns: - bool | dict: Document metadata (e.g. document id, key, revision) or `True` - if **silent** is set to `True`. + Cursor: Document cursor. Raises: - DocumentInsertError: If insertion fails. - DocumentParseError: If the document is malformed. - - References: - - `create-a-document `__ - """ # noqa: E501 - if isinstance(document, dict): - # We assume that the document deserializer works with dictionaries. - document = cast(T, self._ensure_key_from_id(document)) + DocumentGetError: If retrieval fails. + SortValidationError: If sort parameters are invalid. + """ + if not self._is_none_or_dict(filters): + raise ValueError("filters parameter must be a dict") + self._validate_sort_parameters(sort) + if not self._is_none_or_int(skip): + raise ValueError("skip parameter must be a non-negative int") + if not (self._is_none_or_int(limit) or limit == "null"): + raise ValueError("limit parameter must be a non-negative int") - params: Params = {} - if wait_for_sync is not None: - params["waitForSync"] = wait_for_sync - if return_new is not None: - params["returnNew"] = return_new - if return_old is not None: - params["returnOld"] = return_old - if silent is not None: - params["silent"] = silent - if overwrite is not None: - params["overwrite"] = overwrite - if overwrite_mode is not None: - params["overwriteMode"] = overwrite_mode - if keep_null is not None: - params["keepNull"] = keep_null - if merge_objects is not None: - params["mergeObjects"] = merge_objects - if refill_index_caches is not None: - params["refillIndexCaches"] = refill_index_caches - if version_attribute is not None: - params["versionAttribute"] = version_attribute + skip = skip if skip is not None else 0 + limit = limit if limit is not None else "null" + query = f""" + FOR doc IN @@collection + {self._build_filter_conditions(filters)} + LIMIT {skip}, {limit} + {self._build_sort_expression(sort)} + RETURN doc + """ + bind_vars = {"@collection": self.name} + data: Json = {"query": query, "bindVars": bind_vars, "count": True} + headers: RequestHeaders = {} + if allow_dirty_read is not None: + if allow_dirty_read is True: + headers["x-arango-allow-dirty-read"] = "true" + else: + headers["x-arango-allow-dirty-read"] = "false" request = Request( method=Method.POST, - endpoint=f"/_api/document/{self._name}", - params=params, - data=self._doc_serializer.dumps(document), + endpoint="/_api/cursor", + data=self.serializer.dumps(data), + headers=headers, ) - def response_handler(resp: Response) -> bool | Json: - if resp.is_success: + def response_handler(resp: Response) -> Cursor: + if not resp.is_success: + raise DocumentGetError(resp, request) + if self._executor.context == "async": + # We cannot have a cursor giving back async jobs + executor: NonAsyncExecutor = DefaultApiExecutor( + self._executor.connection + ) + else: + executor = cast(NonAsyncExecutor, self._executor) + return Cursor(executor, self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def update_match( + self, + filters: Json, + body: T, + limit: Optional[int | str] = None, + keep_none: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + merge_objects: Optional[bool] = None, + ) -> Result[int]: + """Update matching documents. + + Args: + filters (dict | None): Query filters. + body (dict): Full or partial document body with the updates. + limit (int | str | None): Maximum number of documents to update. + keep_none (bool | None): If set to `True`, fields with value `None` are + retained in the document. Otherwise, they are removed completely. + wait_for_sync (bool | None): Wait until operation has been synced to disk. + merge_objects (bool | None): If set to `True`, sub-dictionaries are merged + instead of the new one overwriting the old one. + + Returns: + int: Number of documents that got updated. + + Raises: + DocumentUpdateError: If update fails. + """ + if not self._is_none_or_dict(filters): + raise ValueError("filters parameter must be a dict") + if not (self._is_none_or_int(limit) or limit == "null"): + raise ValueError("limit parameter must be a non-negative int") + + sync = f", waitForSync: {wait_for_sync}" if wait_for_sync is not None else "" + query = f""" + FOR doc IN @@collection + {self._build_filter_conditions(filters)} + {f"LIMIT {limit}" if limit is not None else ""} + UPDATE doc WITH @body IN @@collection + OPTIONS {{ keepNull: @keep_none, mergeObjects: @merge {sync} }} + """ # noqa: E201 E202 + bind_vars = { + "@collection": self.name, + "body": body, + "keep_none": keep_none, + "merge": merge_objects, + } + data = {"query": query, "bindVars": bind_vars} + + request = Request( + method=Method.POST, + endpoint="/_api/cursor", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> int: + if resp.is_success: + result = self.deserializer.loads(resp.raw_body) + return cast(int, result["extra"]["stats"]["writesExecuted"]) + raise DocumentUpdateError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def replace_match( + self, + filters: Json, + body: T, + limit: Optional[int | str] = None, + wait_for_sync: Optional[bool] = None, + ) -> Result[int]: + """Replace matching documents. + + Args: + filters (dict | None): Query filters. + body (dict): New document body. + limit (int | str | None): Maximum number of documents to replace. + wait_for_sync (bool | None): Wait until operation has been synced to disk. + + Returns: + int: Number of documents that got replaced. + + Raises: + DocumentReplaceError: If replace fails. + """ + if not self._is_none_or_dict(filters): + raise ValueError("filters parameter must be a dict") + if not (self._is_none_or_int(limit) or limit == "null"): + raise ValueError("limit parameter must be a non-negative int") + + sync = f"waitForSync: {wait_for_sync}" if wait_for_sync is not None else "" + query = f""" + FOR doc IN @@collection + {self._build_filter_conditions(filters)} + {f"LIMIT {limit}" if limit is not None else ""} + REPLACE doc WITH @body IN @@collection + {f"OPTIONS {{ {sync} }}" if sync else ""} + """ # noqa: E201 E202 + bind_vars = { + "@collection": self.name, + "body": body, + } + data = {"query": query, "bindVars": bind_vars} + + request = Request( + method=Method.POST, + endpoint="/_api/cursor", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> int: + if resp.is_success: + result = self.deserializer.loads(resp.raw_body) + return cast(int, result["extra"]["stats"]["writesExecuted"]) + raise DocumentReplaceError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def delete_match( + self, + filters: Json, + limit: Optional[int | str] = None, + wait_for_sync: Optional[bool] = None, + ) -> Result[int]: + """Delete matching documents. + + Args: + filters (dict | None): Query filters. + limit (int | str | None): Maximum number of documents to delete. + wait_for_sync (bool | None): Wait until operation has been synced to disk. + + Returns: + int: Number of documents that got deleted. + + Raises: + DocumentDeleteError: If delete fails. + """ + if not self._is_none_or_dict(filters): + raise ValueError("filters parameter must be a dict") + if not (self._is_none_or_int(limit) or limit == "null"): + raise ValueError("limit parameter must be a non-negative int") + + sync = f"waitForSync: {wait_for_sync}" if wait_for_sync is not None else "" + query = f""" + FOR doc IN @@collection + {self._build_filter_conditions(filters)} + {f"LIMIT {limit}" if limit is not None else ""} + REMOVE doc IN @@collection + {f"OPTIONS {{ {sync} }}" if sync else ""} + """ # noqa: E201 E202 + bind_vars = {"@collection": self.name} + data = {"query": query, "bindVars": bind_vars} + + request = Request( + method=Method.POST, + endpoint="/_api/cursor", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> int: + if resp.is_success: + result = self.deserializer.loads(resp.raw_body) + return cast(int, result["extra"]["stats"]["writesExecuted"]) + raise DocumentDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def insert_many( + self, + documents: Sequence[T], + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + overwrite: Optional[bool] = None, + overwrite_mode: Optional[str] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + ) -> Result[Jsons]: + """Insert multiple documents. + + Note: + If inserting a document fails, the exception is not raised but + returned as an object in the "errors" list. It is up to you to + inspect the list to determine which documents were inserted + successfully (returns document metadata) and which were not + (returns exception object). + + Args: + documents (list): Documents to insert. If an item contains the "_key" or + "_id" field, the value is used as the key of the new document + (otherwise it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until documents have been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. Only available if the + `overwrite` option is used. + silent (bool | None): If set to `True`, an empty object is returned as + response if all document operations succeed. No meta-data is returned + for the created documents. If any of the operations raises an error, + an array with the error object(s) is returned. + overwrite (bool | None): If set to `True`, operation does not fail on + duplicate key and existing document is overwritten (replace-insert). + overwrite_mode (str | None): Overwrite mode. Supersedes **overwrite** + option. May be one of "ignore", "replace", "update" or "conflict". + keep_null (bool | None): If set to `True`, fields with value None are + retained in the document. Otherwise, they are removed completely. + Applies only when **overwrite_mode** is set to "update" + (update-insert). + merge_objects (bool | None): If set to `True`, sub-dictionaries are merged + instead of the new one overwriting the old one. Applies only when + **overwrite_mode** is set to "update" (update-insert). + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document operations affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. Only applicable if **overwrite** is set to `True` + or **overwrite_mode** is set to "update" or "replace". + + Returns: + list: Documents metadata (e.g. document id, key, revision) and + errors or just errors if **silent** is set to `True`. + + Raises: + DocumentInsertError: If insertion fails. + + References: + - `create-multiple-documents `__ + """ # noqa: E501 + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if return_new is not None: + params["returnNew"] = return_new + if return_old is not None: + params["returnOld"] = return_old + if silent is not None: + params["silent"] = silent + if overwrite is not None: + params["overwrite"] = overwrite + if overwrite_mode is not None: + params["overwriteMode"] = overwrite_mode + if keep_null is not None: + params["keepNull"] = keep_null + if merge_objects is not None: + params["mergeObjects"] = merge_objects + if refill_index_caches is not None: + params["refillIndexCaches"] = refill_index_caches + if version_attribute is not None: + params["versionAttribute"] = version_attribute + + request = Request( + method=Method.POST, + endpoint=f"/_api/document/{self.name}", + data=self._doc_serializer.dumps(documents), + params=params, + ) + + def response_handler( + resp: Response, + ) -> Jsons: + if not resp.is_success: + raise DocumentInsertError(resp, request) + return self.deserializer.loads_many(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def replace_many( + self, + documents: Sequence[T], + wait_for_sync: Optional[bool] = None, + ignore_revs: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + ) -> Result[Jsons]: + """Insert multiple documents. + + Note: + If replacing a document fails, the exception is not raised but + returned as an object in the "errors" list. It is up to you to + inspect the list to determine which documents were replaced + successfully (returns document metadata) and which were not + (returns exception object). + + Args: + documents (list): New documents to replace the old ones. An item must + contain the "_key" or "_id" field. + wait_for_sync (bool | None): Wait until documents have been synced to disk. + ignore_revs (bool | None): If this is set to `False`, then any `_rev` + attribute given in a body document is taken as a precondition. The + document is only replaced if the current revision is the one + specified. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, an empty object is returned as + response if all document operations succeed. No meta-data is returned + for the created documents. If any of the operations raises an error, + an array with the error object(s) is returned. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document operations affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + + Returns: + list: Documents metadata (e.g. document id, key, revision) and + errors or just errors if **silent** is set to `True`. + + Raises: + DocumentReplaceError: If replacing fails. + + References: + - `replace-multiple-documents `__ + """ # noqa: E501 + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if ignore_revs is not None: + params["ignoreRevs"] = ignore_revs + if return_new is not None: + params["returnNew"] = return_new + if return_old is not None: + params["returnOld"] = return_old + if silent is not None: + params["silent"] = silent + if refill_index_caches is not None: + params["refillIndexCaches"] = refill_index_caches + if version_attribute is not None: + params["versionAttribute"] = version_attribute + + request = Request( + method=Method.PUT, + endpoint=f"/_api/document/{self.name}", + data=self._doc_serializer.dumps(documents), + params=params, + ) + + def response_handler( + resp: Response, + ) -> Jsons: + if not resp.is_success: + raise DocumentReplaceError(resp, request) + return self.deserializer.loads_many(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def update_many( + self, + documents: Sequence[T], + wait_for_sync: Optional[bool] = None, + ignore_revs: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + ) -> Result[Jsons]: + """Insert multiple documents. + + Note: + If updating a document fails, the exception is not raised but + returned as an object in the "errors" list. It is up to you to + inspect the list to determine which documents were updated + successfully (returned as document metadata) and which were not + (returned as exception object). + + Args: + documents (list): Documents to update. An item must contain the "_key" or + "_id" field. + wait_for_sync (bool | None): Wait until documents have been synced to disk. + ignore_revs (bool | None): If this is set to `False`, then any `_rev` + attribute given in a body document is taken as a precondition. The + document is only updated if the current revision is the one + specified. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, an empty object is returned as + response if all document operations succeed. No meta-data is returned + for the created documents. If any of the operations raises an error, + an array with the error object(s) is returned. + keep_null (bool | None): If set to `True`, fields with value None are + retained in the document. Otherwise, they are removed completely. + Applies only when **overwrite_mode** is set to "update" + (update-insert). + merge_objects (bool | None): If set to `True`, sub-dictionaries are merged + instead of the new one overwriting the old one. Applies only when + **overwrite_mode** is set to "update" (update-insert). + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document operations affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + + Returns: + list: Documents metadata (e.g. document id, key, revision) and + errors or just errors if **silent** is set to `True`. + + Raises: + DocumentUpdateError: If update fails. + + References: + - `update-multiple-documents `__ + """ # noqa: E501 + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if ignore_revs is not None: + params["ignoreRevs"] = ignore_revs + if return_new is not None: + params["returnNew"] = return_new + if return_old is not None: + params["returnOld"] = return_old + if silent is not None: + params["silent"] = silent + if keep_null is not None: + params["keepNull"] = keep_null + if merge_objects is not None: + params["mergeObjects"] = merge_objects + if refill_index_caches is not None: + params["refillIndexCaches"] = refill_index_caches + if version_attribute is not None: + params["versionAttribute"] = version_attribute + + request = Request( + method=Method.PATCH, + endpoint=f"/_api/document/{self.name}", + data=self._doc_serializer.dumps(documents), + params=params, + ) + + def response_handler( + resp: Response, + ) -> Jsons: + if not resp.is_success: + raise DocumentUpdateError(resp, request) + return self.deserializer.loads_many(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def delete_many( + self, + documents: Sequence[T], + wait_for_sync: Optional[bool] = None, + ignore_revs: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + ) -> Result[Jsons]: + """Delete multiple documents. + + Note: + If deleting a document fails, the exception is not raised but + returned as an object in the "errors" list. It is up to you to + inspect the list to determine which documents were deleted + successfully (returned as document metadata) and which were not + (returned as exception object). + + Args: + documents (list): Documents to delete. An item must contain the "_key" or + "_id" field. + wait_for_sync (bool | None): Wait until documents have been synced to disk. + ignore_revs (bool | None): If this is set to `False`, then any `_rev` + attribute given in a body document is taken as a precondition. The + document is only updated if the current revision is the one + specified. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, an empty object is returned as + response if all document operations succeed. No meta-data is returned + for the created documents. If any of the operations raises an error, + an array with the error object(s) is returned. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document operations affect the edge index + or cache-enabled persistent indexes. + + Returns: + list: Documents metadata (e.g. document id, key, revision) and + errors or just errors if **silent** is set to `True`. + + Raises: + DocumentRemoveError: If removal fails. + + References: + - `remove-multiple-documents `__ + """ # noqa: E501 + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if ignore_revs is not None: + params["ignoreRevs"] = ignore_revs + if return_old is not None: + params["returnOld"] = return_old + if silent is not None: + params["silent"] = silent + if refill_index_caches is not None: + params["refillIndexCaches"] = refill_index_caches + + request = Request( + method=Method.DELETE, + endpoint=f"/_api/document/{self.name}", + data=self._doc_serializer.dumps(documents), + params=params, + ) + + def response_handler( + resp: Response, + ) -> Jsons: + if not resp.is_success: + raise DocumentDeleteError(resp, request) + return self.deserializer.loads_many(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + +class StandardCollection(Collection[T, U, V]): + """Standard collection API wrapper. + + Args: + executor (ApiExecutor): API executor. + name (str): Collection name + doc_serializer (Serializer): Document serializer. + doc_deserializer (Deserializer): Document deserializer. + """ + + def __init__( + self, + executor: ApiExecutor, + name: str, + doc_serializer: Serializer[T], + doc_deserializer: Deserializer[U, V], + ) -> None: + super().__init__(executor, name, doc_serializer, doc_deserializer) + + def __repr__(self) -> str: + return f"" + + async def get( + self, + document: str | Json, + allow_dirty_read: bool = False, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[Optional[U]]: + """Return a document. + + Args: + document (str | dict): Document ID, key or body. + Document body must contain the "_id" or "_key" field. + allow_dirty_read (bool): Allow reads from followers in a cluster. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + Document or `None` if not found. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. + + References: + - `get-a-document `__ + """ # noqa: E501 + handle = self._get_doc_id(document) + + headers: RequestHeaders = {} + if allow_dirty_read: + headers["x-arango-allow-dirty-read"] = "true" + if if_match is not None: + headers["If-Match"] = if_match + if if_none_match is not None: + headers["If-None-Match"] = if_none_match + + request = Request( + method=Method.GET, + endpoint=f"/_api/document/{handle}", + headers=headers, + ) + + def response_handler(resp: Response) -> Optional[U]: + if resp.is_success: + return self._doc_deserializer.loads(resp.raw_body) + elif resp.status_code == HTTP_NOT_FOUND: + if resp.error_code == DOCUMENT_NOT_FOUND: + return None + else: + raise DocumentGetError(resp, request) + elif resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + else: + raise DocumentGetError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def insert( + self, + document: T, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + overwrite: Optional[bool] = None, + overwrite_mode: Optional[str] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + ) -> Result[bool | Json]: + """Insert a new document. + + Args: + document (dict): Document to insert. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. Only available if the + `overwrite` option is used. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + overwrite (bool | None): If set to `True`, operation does not fail on + duplicate key and existing document is overwritten (replace-insert). + overwrite_mode (str | None): Overwrite mode. Supersedes **overwrite** + option. May be one of "ignore", "replace", "update" or "conflict". + keep_null (bool | None): If set to `True`, fields with value None are + retained in the document. Otherwise, they are removed completely. + Applies only when **overwrite_mode** is set to "update" + (update-insert). + merge_objects (bool | None): If set to `True`, sub-dictionaries are merged + instead of the new one overwriting the old one. Applies only when + **overwrite_mode** is set to "update" (update-insert). + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document insertions affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. Only applicable if **overwrite** is set to `True` + or **overwrite_mode** is set to "update" or "replace". + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + + References: + - `create-a-document `__ + """ # noqa: E501 + if isinstance(document, dict): + document = cast(T, self._ensure_key_from_id(document)) + + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if return_new is not None: + params["returnNew"] = return_new + if return_old is not None: + params["returnOld"] = return_old + if silent is not None: + params["silent"] = silent + if overwrite is not None: + params["overwrite"] = overwrite + if overwrite_mode is not None: + params["overwriteMode"] = overwrite_mode + if keep_null is not None: + params["keepNull"] = keep_null + if merge_objects is not None: + params["mergeObjects"] = merge_objects + if refill_index_caches is not None: + params["refillIndexCaches"] = refill_index_caches + if version_attribute is not None: + params["versionAttribute"] = version_attribute + + request = Request( + method=Method.POST, + endpoint=f"/_api/document/{self._name}", + params=params, + data=self._doc_serializer.dumps(document), + ) + + def response_handler(resp: Response) -> bool | Json: + if resp.is_success: + if silent is True: + return True + return self._executor.deserialize(resp.raw_body) + msg: Optional[str] = None + if resp.status_code == HTTP_BAD_PARAMETER: + msg = ( + "Body does not contain a valid JSON representation of " + "one document." + ) + elif resp.status_code == HTTP_NOT_FOUND: + msg = "Collection not found." + raise DocumentInsertError(resp, request, msg) + + return await self._executor.execute(request, response_handler) + + async def update( + self, + document: T, + ignore_revs: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Update a document. + + Args: + document (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only updated if the current revision is the one + specified. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + merge_objects (bool | None): Controls whether objects (not arrays) are + merged if present in both the existing and the patch document. + If set to `False`, the value in the patch document overwrites the + existing document’s value. If set to `True`, objects are merged. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + if_match (str | None): You can conditionally update a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentUpdateError: If update fails. + + References: + - `update-a-document `__ + """ # noqa: E501 + params: Params = {} + if ignore_revs is not None: + params["ignoreRevs"] = ignore_revs + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if return_new is not None: + params["returnNew"] = return_new + if return_old is not None: + params["returnOld"] = return_old + if silent is not None: + params["silent"] = silent + if keep_null is not None: + params["keepNull"] = keep_null + if merge_objects is not None: + params["mergeObjects"] = merge_objects + if refill_index_caches is not None: + params["refillIndexCaches"] = refill_index_caches + if version_attribute is not None: + params["versionAttribute"] = version_attribute + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match + + request = Request( + method=Method.PATCH, + endpoint=f"/_api/document/{self._extract_id(cast(Json, document))}", + params=params, + headers=headers, + data=self._doc_serializer.dumps(document), + ) + + def response_handler(resp: Response) -> bool | Json: + if resp.is_success: + if silent is True: + return True + return self._executor.deserialize(resp.raw_body) + msg: Optional[str] = None + if resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + elif resp.status_code == HTTP_NOT_FOUND: + msg = "Document, collection or transaction not found." + raise DocumentUpdateError(resp, request, msg) + + return await self._executor.execute(request, response_handler) + + async def replace( + self, + document: T, + ignore_revs: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Replace a document. + + Args: + document (dict): New document. It must contain the "_key" or "_id" field. + Edge document must also have "_from" and "_to" fields. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only replaced if the current revision is the one + specified. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentReplaceError: If replace fails. + + References: + - `replace-a-document `__ + """ # noqa: E501 + params: Params = {} + if ignore_revs is not None: + params["ignoreRevs"] = ignore_revs + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if return_new is not None: + params["returnNew"] = return_new + if return_old is not None: + params["returnOld"] = return_old + if silent is not None: + params["silent"] = silent + if refill_index_caches is not None: + params["refillIndexCaches"] = refill_index_caches + if version_attribute is not None: + params["versionAttribute"] = version_attribute + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match + + request = Request( + method=Method.PUT, + endpoint=f"/_api/document/{self._extract_id(cast(Json, document))}", + params=params, + headers=headers, + data=self._doc_serializer.dumps(document), + ) + + def response_handler(resp: Response) -> bool | Json: + if resp.is_success: if silent is True: return True return self._executor.deserialize(resp.raw_body) msg: Optional[str] = None - if resp.status_code == HTTP_BAD_PARAMETER: - msg = ( - "Body does not contain a valid JSON representation of " - "one document." - ) + if resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) elif resp.status_code == HTTP_NOT_FOUND: - msg = "Collection not found." - raise DocumentInsertError(resp, request, msg) + msg = "Document, collection or transaction not found." + raise DocumentReplaceError(resp, request, msg) return await self._executor.execute(request, response_handler) - async def update( + async def delete( self, document: T, ignore_revs: Optional[bool] = None, + ignore_missing: bool = False, wait_for_sync: Optional[bool] = None, - return_new: Optional[bool] = None, return_old: Optional[bool] = None, silent: Optional[bool] = None, - keep_null: Optional[bool] = None, - merge_objects: Optional[bool] = None, refill_index_caches: Optional[bool] = None, - version_attribute: Optional[str] = None, if_match: Optional[str] = None, ) -> Result[bool | Json]: - """Insert a new document. + """Delete a document. Args: - document (dict): Partial or full document with the updated values. - It must contain the "_key" or "_id" field. + document (dict): Document ID, key or body. The body must contain the + "_key" or "_id" field. ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the document is ignored. If this is set to `False`, then the `_rev` attribute given in the body document is taken as a precondition. - The document is only updated if the current revision is the one + The document is only replaced if the current revision is the one specified. - wait_for_sync (bool | None): Wait until document has been synced to disk. - return_new (bool | None): Additionally return the complete new document - under the attribute `new` in the result. + ignore_missing (bool): Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + wait_for_sync (bool | None): Wait until operation has been synced to disk. return_old (bool | None): Additionally return the complete old document under the attribute `old` in the result. silent (bool | None): If set to `True`, no document metadata is returned. This can be used to save resources. - keep_null (bool | None): If the intention is to delete existing attributes - with the patch command, set this parameter to `False`. - merge_objects (bool | None): Controls whether objects (not arrays) are - merged if present in both the existing and the patch document. - If set to `False`, the value in the patch document overwrites the - existing document’s value. If set to `True`, objects are merged. refill_index_caches (bool | None): Whether to add new entries to in-memory index caches if document updates affect the edge index or cache-enabled persistent indexes. - version_attribute (str | None): Support for simple external versioning to - document operations. - if_match (str | None): You can conditionally update a document based on a - target revision id by using the "if-match" HTTP header. + if_match (bool | None): You can conditionally remove a document based + on a target revision id by using the "if-match" HTTP header. Returns: bool | dict: Document metadata (e.g. document id, key, revision) or `True` - if **silent** is set to `True`. + if **silent** is set to `True` and the document was found. Raises: DocumentRevisionError: If precondition was violated. - DocumentUpdateError: If update fails. + DocumentDeleteError: If deletion fails. References: - - `update-a-document `__ + - `remove-a-document `__ """ # noqa: E501 params: Params = {} if ignore_revs is not None: params["ignoreRevs"] = ignore_revs if wait_for_sync is not None: params["waitForSync"] = wait_for_sync - if return_new is not None: - params["returnNew"] = return_new if return_old is not None: params["returnOld"] = return_old if silent is not None: params["silent"] = silent - if keep_null is not None: - params["keepNull"] = keep_null - if merge_objects is not None: - params["mergeObjects"] = merge_objects if refill_index_caches is not None: params["refillIndexCaches"] = refill_index_caches - if version_attribute is not None: - params["versionAttribute"] = version_attribute headers: RequestHeaders = {} if if_match is not None: headers["If-Match"] = if_match request = Request( - method=Method.PATCH, + method=Method.DELETE, endpoint=f"/_api/document/{self._extract_id(cast(Json, document))}", params=params, headers=headers, - data=self._doc_serializer.dumps(document), ) def response_handler(resp: Response) -> bool | Json: @@ -883,74 +1729,310 @@ def response_handler(resp: Response) -> bool | Json: if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) elif resp.status_code == HTTP_NOT_FOUND: + if resp.error_code == DOCUMENT_NOT_FOUND and ignore_missing: + return False msg = "Document, collection or transaction not found." - raise DocumentUpdateError(resp, request, msg) + raise DocumentDeleteError(resp, request, msg) return await self._executor.execute(request, response_handler) - async def replace( + +class VertexCollection(Collection[T, U, V]): + """Vertex collection API wrapper. + + Args: + executor (ApiExecutor): API executor. + name (str): Collection name + graph (str): Graph name. + doc_serializer (Serializer): Document serializer. + doc_deserializer (Deserializer): Document deserializer. + """ + + def __init__( self, - document: T, - ignore_revs: Optional[bool] = None, + executor: ApiExecutor, + graph: str, + name: str, + doc_serializer: Serializer[T], + doc_deserializer: Deserializer[U, V], + ) -> None: + super().__init__(executor, name, doc_serializer, doc_deserializer) + self._graph = graph + + def __repr__(self) -> str: + return f"" + + @staticmethod + def _parse_result(data: Json) -> Json: + """Parse the result from the response. + + Args: + data (dict): Response data. + + Returns: + dict: Parsed result. + """ + result: Json = {} + if "new" in data or "old" in data: + result["vertex"] = data["vertex"] + if "new" in data: + result["new"] = data["new"] + if "old" in data: + result["old"] = data["old"] + else: + result = data["vertex"] + return result + + @property + def graph(self) -> str: + """Return the graph name. + + Returns: + str: Graph name. + """ + return self._graph + + async def get( + self, + vertex: str | Json, + rev: Optional[str] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[Optional[Json]]: + """Return a vertex from the graph. + + Args: + vertex (str | dict): Document ID, key or body. + Document body must contain the "_id" or "_key" field. + rev (str | None): If this is set a document is only returned if it + has exactly this revision. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + dict | None: Document or `None` if not found. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. + + References: + - `get-a-vertex `__ + """ # noqa: E501 + handle = self._get_doc_id(vertex) + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match + if if_none_match is not None: + headers["If-None-Match"] = if_none_match + + params: Params = {} + if rev is not None: + params["rev"] = rev + + request = Request( + method=Method.GET, + endpoint=f"/_api/gharial/{self._graph}/vertex/{handle}", + headers=headers, + params=params, + ) + + def response_handler(resp: Response) -> Optional[Json]: + if resp.is_success: + return self._parse_result(self.deserializer.loads(resp.raw_body)) + elif resp.status_code == HTTP_NOT_FOUND: + if resp.error_code == DOCUMENT_NOT_FOUND: + return None + else: + raise DocumentGetError(resp, request) + elif resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + else: + raise DocumentGetError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def insert( + self, + vertex: T, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + ) -> Result[Json]: + """Insert a new vertex document. + + Args: + vertex (dict): Document to insert. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` is specified, the result contains the document + metadata in the "vertex" field and the new document in the "new" field. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + + References: + - `create-a-vertex `__ + """ # noqa: E501 + if isinstance(vertex, dict): + vertex = cast(T, self._ensure_key_from_id(vertex)) + + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if return_new is not None: + params["returnNew"] = return_new + + request = Request( + method=Method.POST, + endpoint=f"/_api/gharial/{self._graph}/vertex/{self.name}", + params=params, + data=self._doc_serializer.dumps(vertex), + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self._parse_result(self.deserializer.loads(resp.raw_body)) + msg: Optional[str] = None + if resp.status_code == HTTP_NOT_FOUND: + msg = ( + "The graph cannot be found or the collection is not " + "part of the graph." + ) + raise DocumentInsertError(resp, request, msg) + + return await self._executor.execute(request, response_handler) + + async def update( + self, + vertex: T, wait_for_sync: Optional[bool] = None, + keep_null: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[Json]: + """Update a vertex in the graph. + + Args: + vertex (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field. + wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + if_match (str | None): You can conditionally update a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "vertex" field and two additional fields + ("new" and "old"). + + Raises: + DocumentUpdateError: If update fails. + + References: + - `update-a-vertex `__ + """ # noqa: E501 + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if keep_null is not None: + params["keepNull"] = keep_null + if return_new is not None: + params["returnNew"] = return_new + if return_old is not None: + params["returnOld"] = return_old + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match + + request = Request( + method=Method.PATCH, + endpoint=f"/_api/gharial/{self._graph}/vertex/" + f"{self._get_doc_id(cast(Json, vertex))}", + params=params, + headers=headers, + data=self._doc_serializer.dumps(vertex), + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self._parse_result(self.deserializer.loads(resp.raw_body)) + msg: Optional[str] = None + if resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + elif resp.status_code == HTTP_NOT_FOUND: + msg = ( + "Vertex or graph not found, or the collection is not part of " + "this graph. Error may also occur if the transaction ID is " + "unknown." + ) + raise DocumentUpdateError(resp, request, msg) + + return await self._executor.execute(request, response_handler) + + async def replace( + self, + vertex: T, + wait_for_sync: Optional[bool] = None, + keep_null: Optional[bool] = None, return_new: Optional[bool] = None, return_old: Optional[bool] = None, - silent: Optional[bool] = None, - refill_index_caches: Optional[bool] = None, - version_attribute: Optional[str] = None, if_match: Optional[str] = None, - ) -> Result[bool | Json]: - """Replace a document. + ) -> Result[Json]: + """Replace a vertex in the graph. Args: - document (dict): New document. It must contain the "_key" or "_id" field. - Edge document must also have "_from" and "_to" fields. - ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the - document is ignored. If this is set to `False`, then the `_rev` - attribute given in the body document is taken as a precondition. - The document is only replaced if the current revision is the one - specified. + vertex (dict): New document. It must contain the "_key" or "_id" field. wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. return_new (bool | None): Additionally return the complete new document under the attribute `new` in the result. return_old (bool | None): Additionally return the complete old document under the attribute `old` in the result. - silent (bool | None): If set to `True`, no document metadata is returned. - This can be used to save resources. - refill_index_caches (bool | None): Whether to add new entries to - in-memory index caches if document updates affect the edge index - or cache-enabled persistent indexes. - version_attribute (str | None): Support for simple external versioning to - document operations. if_match (str | None): You can conditionally replace a document based on a target revision id by using the "if-match" HTTP header. Returns: - bool | dict: Document metadata (e.g. document id, key, revision) or `True` - if **silent** is set to `True`. + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "vertex" field and two additional fields + ("new" and "old"). Raises: DocumentRevisionError: If precondition was violated. DocumentReplaceError: If replace fails. References: - - `replace-a-document `__ + - `replace-a-vertex `__ """ # noqa: E501 params: Params = {} - if ignore_revs is not None: - params["ignoreRevs"] = ignore_revs if wait_for_sync is not None: params["waitForSync"] = wait_for_sync + if keep_null is not None: + params["keepNull"] = keep_null if return_new is not None: params["returnNew"] = return_new if return_old is not None: params["returnOld"] = return_old - if silent is not None: - params["silent"] = silent - if refill_index_caches is not None: - params["refillIndexCaches"] = refill_index_caches - if version_attribute is not None: - params["versionAttribute"] = version_attribute headers: RequestHeaders = {} if if_match is not None: @@ -958,83 +2040,67 @@ async def replace( request = Request( method=Method.PUT, - endpoint=f"/_api/document/{self._extract_id(cast(Json, document))}", + endpoint=f"/_api/gharial/{self._graph}/vertex/" + f"{self._get_doc_id(cast(Json, vertex))}", params=params, headers=headers, - data=self._doc_serializer.dumps(document), + data=self._doc_serializer.dumps(vertex), ) - def response_handler(resp: Response) -> bool | Json: + def response_handler(resp: Response) -> Json: if resp.is_success: - if silent is True: - return True - return self._executor.deserialize(resp.raw_body) + return self._parse_result(self.deserializer.loads(resp.raw_body)) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) elif resp.status_code == HTTP_NOT_FOUND: - msg = "Document, collection or transaction not found." + msg = ( + "Vertex or graph not found, or the collection is not part of " + "this graph. Error may also occur if the transaction ID is " + "unknown." + ) raise DocumentReplaceError(resp, request, msg) return await self._executor.execute(request, response_handler) async def delete( self, - document: T, - ignore_revs: Optional[bool] = None, + vertex: T, ignore_missing: bool = False, wait_for_sync: Optional[bool] = None, return_old: Optional[bool] = None, - silent: Optional[bool] = None, - refill_index_caches: Optional[bool] = None, if_match: Optional[str] = None, ) -> Result[bool | Json]: - """Delete a document. + """Delete a vertex from the graph. Args: - document (dict): Document ID, key or body. The body must contain the + vertex (dict): Document ID, key or body. The body must contain the "_key" or "_id" field. - ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the - document is ignored. If this is set to `False`, then the `_rev` - attribute given in the body document is taken as a precondition. - The document is only replaced if the current revision is the one - specified. ignore_missing (bool): Do not raise an exception on missing document. - This parameter has no effect in transactions where an exception is - always raised on failures. wait_for_sync (bool | None): Wait until operation has been synced to disk. return_old (bool | None): Additionally return the complete old document under the attribute `old` in the result. - silent (bool | None): If set to `True`, no document metadata is returned. - This can be used to save resources. - refill_index_caches (bool | None): Whether to add new entries to - in-memory index caches if document updates affect the edge index - or cache-enabled persistent indexes. - if_match (bool | None): You can conditionally remove a document based - on a target revision id by using the "if-match" HTTP header. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. Returns: - bool | dict: Document metadata (e.g. document id, key, revision) or `True` - if **silent** is set to `True` and the document was found. + bool | dict: `True` if vertex was deleted successfully, `False` if vertex + was not found and **ignore_missing** was set to `True` (does not apply + in transactions). Old document is returned if **return_old** is set + to `True`. Raises: DocumentRevisionError: If precondition was violated. DocumentDeleteError: If deletion fails. References: - - `remove-a-document `__ + - `remove-a-vertex `__ """ # noqa: E501 params: Params = {} - if ignore_revs is not None: - params["ignoreRevs"] = ignore_revs if wait_for_sync is not None: params["waitForSync"] = wait_for_sync if return_old is not None: params["returnOld"] = return_old - if silent is not None: - params["silent"] = silent - if refill_index_caches is not None: - params["refillIndexCaches"] = refill_index_caches headers: RequestHeaders = {} if if_match is not None: @@ -1042,672 +2108,531 @@ async def delete( request = Request( method=Method.DELETE, - endpoint=f"/_api/document/{self._extract_id(cast(Json, document))}", + endpoint=f"/_api/gharial/{self._graph}/vertex/" + f"{self._get_doc_id(cast(Json, vertex))}", params=params, headers=headers, ) def response_handler(resp: Response) -> bool | Json: if resp.is_success: - if silent is True: - return True - return self._executor.deserialize(resp.raw_body) + data: Json = self.deserializer.loads(resp.raw_body) + if "old" in data: + return cast(Json, data["old"]) + return True msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) elif resp.status_code == HTTP_NOT_FOUND: if resp.error_code == DOCUMENT_NOT_FOUND and ignore_missing: return False - msg = "Document, collection or transaction not found." - raise DocumentDeleteError(resp, request, msg) - - return await self._executor.execute(request, response_handler) - - async def get_many( - self, - documents: Sequence[str | T], - allow_dirty_read: Optional[bool] = None, - ignore_revs: Optional[bool] = None, - ) -> Result[V]: - """Return multiple documents ignoring any missing ones. - - Args: - documents (list): List of document IDs, keys or bodies. A search document - must contain at least a value for the `_key` field. A value for `_rev` - may be specified to verify whether the document has the same revision - value, unless `ignoreRevs` is set to false. - allow_dirty_read (bool | None): Allow reads from followers in a cluster. - ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the - document is ignored. If this is set to `False`, then the `_rev` - attribute given in the body document is taken as a precondition. - The document is only replaced if the current revision is the one - specified. - - Returns: - list: List of documents. Missing ones are not included. - - Raises: - DocumentGetError: If retrieval fails. - - References: - - `get-multiple-documents `__ - """ # noqa: E501 - params: Params = {"onlyget": True} - if ignore_revs is not None: - params["ignoreRevs"] = ignore_revs - - headers: RequestHeaders = {} - if allow_dirty_read is not None: - if allow_dirty_read is True: - headers["x-arango-allow-dirty-read"] = "true" - else: - headers["x-arango-allow-dirty-read"] = "false" - - request = Request( - method=Method.PUT, - endpoint=f"/_api/document/{self.name}", - params=params, - headers=headers, - data=self._doc_serializer.dumps(documents), - ) - - def response_handler(resp: Response) -> V: - if not resp.is_success: - raise DocumentGetError(resp, request) - return self._doc_deserializer.loads_many(resp.raw_body) - - return await self._executor.execute(request, response_handler) - - async def find( - self, - filters: Optional[Json] = None, - skip: Optional[int] = None, - limit: Optional[int | str] = None, - allow_dirty_read: Optional[bool] = False, - sort: Optional[Jsons] = None, - ) -> Result[Cursor]: - """Return all documents that match the given filters. - - Args: - filters (dict | None): Query filters. - skip (int | None): Number of documents to skip. - limit (int | str | None): Maximum number of documents to return. - allow_dirty_read (bool): Allow reads from followers in a cluster. - sort (list | None): Document sort parameters. - - Returns: - Cursor: Document cursor. - - Raises: - DocumentGetError: If retrieval fails. - SortValidationError: If sort parameters are invalid. - """ - if not self._is_none_or_dict(filters): - raise ValueError("filters parameter must be a dict") - self._validate_sort_parameters(sort) - if not self._is_none_or_int(skip): - raise ValueError("skip parameter must be a non-negative int") - if not (self._is_none_or_int(limit) or limit == "null"): - raise ValueError("limit parameter must be a non-negative int") - - skip = skip if skip is not None else 0 - limit = limit if limit is not None else "null" - query = f""" - FOR doc IN @@collection - {self._build_filter_conditions(filters)} - LIMIT {skip}, {limit} - {self._build_sort_expression(sort)} - RETURN doc - """ - bind_vars = {"@collection": self.name} - data: Json = {"query": query, "bindVars": bind_vars, "count": True} - headers: RequestHeaders = {} - if allow_dirty_read is not None: - if allow_dirty_read is True: - headers["x-arango-allow-dirty-read"] = "true" - else: - headers["x-arango-allow-dirty-read"] = "false" - - request = Request( - method=Method.POST, - endpoint="/_api/cursor", - data=self.serializer.dumps(data), - headers=headers, - ) - - def response_handler(resp: Response) -> Cursor: - if not resp.is_success: - raise DocumentGetError(resp, request) - if self._executor.context == "async": - # We cannot have a cursor giving back async jobs - executor: NonAsyncExecutor = DefaultApiExecutor( - self._executor.connection + msg = ( + "Vertex or graph not found, or the collection is not part of " + "this graph. Error may also occur if the transaction ID is " + "unknown." ) - else: - executor = cast(NonAsyncExecutor, self._executor) - return Cursor(executor, self.deserializer.loads(resp.raw_body)) - - return await self._executor.execute(request, response_handler) - - async def update_match( - self, - filters: Json, - body: T, - limit: Optional[int | str] = None, - keep_none: Optional[bool] = None, - wait_for_sync: Optional[bool] = None, - merge_objects: Optional[bool] = None, - ) -> Result[int]: - """Update matching documents. - - Args: - filters (dict | None): Query filters. - body (dict): Full or partial document body with the updates. - limit (int | str | None): Maximum number of documents to update. - keep_none (bool | None): If set to `True`, fields with value `None` are - retained in the document. Otherwise, they are removed completely. - wait_for_sync (bool | None): Wait until operation has been synced to disk. - merge_objects (bool | None): If set to `True`, sub-dictionaries are merged - instead of the new one overwriting the old one. - - Returns: - int: Number of documents that got updated. - - Raises: - DocumentUpdateError: If update fails. - """ - if not self._is_none_or_dict(filters): - raise ValueError("filters parameter must be a dict") - if not (self._is_none_or_int(limit) or limit == "null"): - raise ValueError("limit parameter must be a non-negative int") + raise DocumentDeleteError(resp, request, msg) - sync = f", waitForSync: {wait_for_sync}" if wait_for_sync is not None else "" - query = f""" - FOR doc IN @@collection - {self._build_filter_conditions(filters)} - {f"LIMIT {limit}" if limit is not None else ""} - UPDATE doc WITH @body IN @@collection - OPTIONS {{ keepNull: @keep_none, mergeObjects: @merge {sync} }} - """ # noqa: E201 E202 - bind_vars = { - "@collection": self.name, - "body": body, - "keep_none": keep_none, - "merge": merge_objects, - } - data = {"query": query, "bindVars": bind_vars} + return await self._executor.execute(request, response_handler) - request = Request( - method=Method.POST, - endpoint="/_api/cursor", - data=self.serializer.dumps(data), - ) - def response_handler(resp: Response) -> int: - if resp.is_success: - result = self.deserializer.loads(resp.raw_body) - return cast(int, result["extra"]["stats"]["writesExecuted"]) - raise DocumentUpdateError(resp, request) +class EdgeCollection(Collection[T, U, V]): + """Edge collection API wrapper. - return await self._executor.execute(request, response_handler) + Args: + executor (ApiExecutor): API executor. + name (str): Collection name + graph (str): Graph name. + doc_serializer (Serializer): Document serializer. + doc_deserializer (Deserializer): Document deserializer. + """ - async def replace_match( + def __init__( self, - filters: Json, - body: T, - limit: Optional[int | str] = None, - wait_for_sync: Optional[bool] = None, - ) -> Result[int]: - """Replace matching documents. + executor: ApiExecutor, + graph: str, + name: str, + doc_serializer: Serializer[T], + doc_deserializer: Deserializer[U, V], + ) -> None: + super().__init__(executor, name, doc_serializer, doc_deserializer) + self._graph = graph + + def __repr__(self) -> str: + return f"" + + @staticmethod + def _parse_result(data: Json) -> Json: + """Parse the result from the response. Args: - filters (dict | None): Query filters. - body (dict): New document body. - limit (int | str | None): Maximum number of documents to replace. - wait_for_sync (bool | None): Wait until operation has been synced to disk. + data (dict): Response data. Returns: - int: Number of documents that got replaced. - - Raises: - DocumentReplaceError: If replace fails. + dict: Parsed result. """ - if not self._is_none_or_dict(filters): - raise ValueError("filters parameter must be a dict") - if not (self._is_none_or_int(limit) or limit == "null"): - raise ValueError("limit parameter must be a non-negative int") - - sync = f"waitForSync: {wait_for_sync}" if wait_for_sync is not None else "" - query = f""" - FOR doc IN @@collection - {self._build_filter_conditions(filters)} - {f"LIMIT {limit}" if limit is not None else ""} - REPLACE doc WITH @body IN @@collection - {f"OPTIONS {{ {sync} }}" if sync else ""} - """ # noqa: E201 E202 - bind_vars = { - "@collection": self.name, - "body": body, - } - data = {"query": query, "bindVars": bind_vars} - - request = Request( - method=Method.POST, - endpoint="/_api/cursor", - data=self.serializer.dumps(data), - ) + result: Json = {} + if "new" in data or "old" in data: + result["edge"] = data["edge"] + if "new" in data: + result["new"] = data["new"] + if "old" in data: + result["old"] = data["old"] + else: + result = data["edge"] + return result - def response_handler(resp: Response) -> int: - if resp.is_success: - result = self.deserializer.loads(resp.raw_body) - return cast(int, result["extra"]["stats"]["writesExecuted"]) - raise DocumentReplaceError(resp, request) + @property + def graph(self) -> str: + """Return the graph name. - return await self._executor.execute(request, response_handler) + Returns: + str: Graph name. + """ + return self._graph - async def delete_match( + async def get( self, - filters: Json, - limit: Optional[int | str] = None, - wait_for_sync: Optional[bool] = None, - ) -> Result[int]: - """Delete matching documents. + edge: str | Json, + rev: Optional[str] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[Optional[Json]]: + """Return an edge from the graph. Args: - filters (dict | None): Query filters. - limit (int | str | None): Maximum number of documents to delete. - wait_for_sync (bool | None): Wait until operation has been synced to disk. + edge (str | dict): Document ID, key or body. + Document body must contain the "_id" or "_key" field. + rev (str | None): If this is set a document is only returned if it + has exactly this revision. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. Returns: - int: Number of documents that got deleted. + dict | None: Document or `None` if not found. Raises: - DocumentDeleteError: If delete fails. - """ - if not self._is_none_or_dict(filters): - raise ValueError("filters parameter must be a dict") - if not (self._is_none_or_int(limit) or limit == "null"): - raise ValueError("limit parameter must be a non-negative int") + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. - sync = f"waitForSync: {wait_for_sync}" if wait_for_sync is not None else "" - query = f""" - FOR doc IN @@collection - {self._build_filter_conditions(filters)} - {f"LIMIT {limit}" if limit is not None else ""} - REMOVE doc IN @@collection - {f"OPTIONS {{ {sync} }}" if sync else ""} - """ # noqa: E201 E202 - bind_vars = {"@collection": self.name} - data = {"query": query, "bindVars": bind_vars} + References: + - `get-an-edge `__ + """ # noqa: E501 + handle = self._get_doc_id(edge) + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match + if if_none_match is not None: + headers["If-None-Match"] = if_none_match + + params: Params = {} + if rev is not None: + params["rev"] = rev request = Request( - method=Method.POST, - endpoint="/_api/cursor", - data=self.serializer.dumps(data), + method=Method.GET, + endpoint=f"/_api/gharial/{self._graph}/edge/{handle}", + headers=headers, + params=params, ) - def response_handler(resp: Response) -> int: + def response_handler(resp: Response) -> Optional[Json]: if resp.is_success: - result = self.deserializer.loads(resp.raw_body) - return cast(int, result["extra"]["stats"]["writesExecuted"]) - raise DocumentDeleteError(resp, request) + return self._parse_result(self.deserializer.loads(resp.raw_body)) + elif resp.status_code == HTTP_NOT_FOUND: + if resp.error_code == DOCUMENT_NOT_FOUND: + return None + else: + raise DocumentGetError(resp, request) + elif resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + else: + raise DocumentGetError(resp, request) return await self._executor.execute(request, response_handler) - async def insert_many( + async def insert( self, - documents: Sequence[T], + edge: T, wait_for_sync: Optional[bool] = None, return_new: Optional[bool] = None, - return_old: Optional[bool] = None, - silent: Optional[bool] = None, - overwrite: Optional[bool] = None, - overwrite_mode: Optional[str] = None, - keep_null: Optional[bool] = None, - merge_objects: Optional[bool] = None, - refill_index_caches: Optional[bool] = None, - version_attribute: Optional[str] = None, - ) -> Result[Jsons]: - """Insert multiple documents. - - Note: - If inserting a document fails, the exception is not raised but - returned as an object in the "errors" list. It is up to you to - inspect the list to determine which documents were inserted - successfully (returns document metadata) and which were not - (returns exception object). + ) -> Result[Json]: + """Insert a new edge document. Args: - documents (list): Documents to insert. If an item contains the "_key" or - "_id" field, the value is used as the key of the new document - (otherwise it is auto-generated). Any "_rev" field is ignored. - wait_for_sync (bool | None): Wait until documents have been synced to disk. + edge (dict): Document to insert. It must contain "_from" and + "_to" fields. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until document has been synced to disk. return_new (bool | None): Additionally return the complete new document under the attribute `new` in the result. - return_old (bool | None): Additionally return the complete old document - under the attribute `old` in the result. Only available if the - `overwrite` option is used. - silent (bool | None): If set to `True`, an empty object is returned as - response if all document operations succeed. No meta-data is returned - for the created documents. If any of the operations raises an error, - an array with the error object(s) is returned. - overwrite (bool | None): If set to `True`, operation does not fail on - duplicate key and existing document is overwritten (replace-insert). - overwrite_mode (str | None): Overwrite mode. Supersedes **overwrite** - option. May be one of "ignore", "replace", "update" or "conflict". - keep_null (bool | None): If set to `True`, fields with value None are - retained in the document. Otherwise, they are removed completely. - Applies only when **overwrite_mode** is set to "update" - (update-insert). - merge_objects (bool | None): If set to `True`, sub-dictionaries are merged - instead of the new one overwriting the old one. Applies only when - **overwrite_mode** is set to "update" (update-insert). - refill_index_caches (bool | None): Whether to add new entries to - in-memory index caches if document operations affect the edge index - or cache-enabled persistent indexes. - version_attribute (str | None): Support for simple external versioning to - document operations. Only applicable if **overwrite** is set to `True` - or **overwrite_mode** is set to "update" or "replace". Returns: - list: Documents metadata (e.g. document id, key, revision) and - errors or just errors if **silent** is set to `True`. + dict: Document metadata (e.g. document id, key, revision). + If `return_new` is specified, the result contains the document + metadata in the "edge" field and the new document in the "new" field. Raises: DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. References: - - `create-multiple-documents `__ + - `create-an-edge `__ """ # noqa: E501 + if isinstance(edge, dict): + edge = cast(T, self._ensure_key_from_id(edge)) + params: Params = {} if wait_for_sync is not None: params["waitForSync"] = wait_for_sync if return_new is not None: params["returnNew"] = return_new - if return_old is not None: - params["returnOld"] = return_old - if silent is not None: - params["silent"] = silent - if overwrite is not None: - params["overwrite"] = overwrite - if overwrite_mode is not None: - params["overwriteMode"] = overwrite_mode - if keep_null is not None: - params["keepNull"] = keep_null - if merge_objects is not None: - params["mergeObjects"] = merge_objects - if refill_index_caches is not None: - params["refillIndexCaches"] = refill_index_caches - if version_attribute is not None: - params["versionAttribute"] = version_attribute request = Request( method=Method.POST, - endpoint=f"/_api/document/{self.name}", - data=self._doc_serializer.dumps(documents), + endpoint=f"/_api/gharial/{self._graph}/edge/{self.name}", params=params, + data=self._doc_serializer.dumps(edge), ) - def response_handler( - resp: Response, - ) -> Jsons: - if not resp.is_success: - raise DocumentInsertError(resp, request) - return self.deserializer.loads_many(resp.raw_body) + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self._parse_result(self.deserializer.loads(resp.raw_body)) + msg: Optional[str] = None + if resp.status_code == HTTP_NOT_FOUND: + msg = ( + "The graph cannot be found or the edge collection is not " + "part of the graph. It is also possible that the vertex " + "collection referenced in the _from or _to attribute is not part " + "of the graph or the vertex collection is part of the graph, but " + "does not exist. Finally check that _from or _to vertex do exist." + ) + raise DocumentInsertError(resp, request, msg) return await self._executor.execute(request, response_handler) - async def replace_many( + async def update( self, - documents: Sequence[T], + edge: T, wait_for_sync: Optional[bool] = None, - ignore_revs: Optional[bool] = None, + keep_null: Optional[bool] = None, return_new: Optional[bool] = None, return_old: Optional[bool] = None, - silent: Optional[bool] = None, - refill_index_caches: Optional[bool] = None, - version_attribute: Optional[str] = None, - ) -> Result[Jsons]: - """Insert multiple documents. - - Note: - If replacing a document fails, the exception is not raised but - returned as an object in the "errors" list. It is up to you to - inspect the list to determine which documents were replaced - successfully (returns document metadata) and which were not - (returns exception object). + if_match: Optional[str] = None, + ) -> Result[Json]: + """Update an edge in the graph. Args: - documents (list): New documents to replace the old ones. An item must - contain the "_key" or "_id" field. - wait_for_sync (bool | None): Wait until documents have been synced to disk. - ignore_revs (bool | None): If this is set to `False`, then any `_rev` - attribute given in a body document is taken as a precondition. The - document is only replaced if the current revision is the one - specified. + edge (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field, along with "_from" and + "_to" fields. + wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. return_new (bool | None): Additionally return the complete new document under the attribute `new` in the result. return_old (bool | None): Additionally return the complete old document under the attribute `old` in the result. - silent (bool | None): If set to `True`, an empty object is returned as - response if all document operations succeed. No meta-data is returned - for the created documents. If any of the operations raises an error, - an array with the error object(s) is returned. - refill_index_caches (bool | None): Whether to add new entries to - in-memory index caches if document operations affect the edge index - or cache-enabled persistent indexes. - version_attribute (str | None): Support for simple external versioning to - document operations. + if_match (str | None): You can conditionally update a document based on a + target revision id by using the "if-match" HTTP header. Returns: - list: Documents metadata (e.g. document id, key, revision) and - errors or just errors if **silent** is set to `True`. + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "edge" field and two additional fields + ("new" and "old"). Raises: - DocumentReplaceError: If replacing fails. + DocumentUpdateError: If update fails. References: - - `replace-multiple-documents `__ + - `update-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: params["waitForSync"] = wait_for_sync - if ignore_revs is not None: - params["ignoreRevs"] = ignore_revs + if keep_null is not None: + params["keepNull"] = keep_null if return_new is not None: params["returnNew"] = return_new if return_old is not None: params["returnOld"] = return_old - if silent is not None: - params["silent"] = silent - if refill_index_caches is not None: - params["refillIndexCaches"] = refill_index_caches - if version_attribute is not None: - params["versionAttribute"] = version_attribute + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match request = Request( - method=Method.PUT, - endpoint=f"/_api/document/{self.name}", - data=self._doc_serializer.dumps(documents), + method=Method.PATCH, + endpoint=f"/_api/gharial/{self._graph}/edge/" + f"{self._get_doc_id(cast(Json, edge))}", params=params, + headers=headers, + data=self._doc_serializer.dumps(edge), ) - def response_handler( - resp: Response, - ) -> Jsons: - if not resp.is_success: - raise DocumentReplaceError(resp, request) - return self.deserializer.loads_many(resp.raw_body) + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self._parse_result(self.deserializer.loads(resp.raw_body)) + msg: Optional[str] = None + if resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + elif resp.status_code == HTTP_NOT_FOUND: + msg = ( + "The graph cannot be found or the edge collection is not " + "part of the graph. It is also possible that the vertex " + "collection referenced in the _from or _to attribute is not part " + "of the graph or the vertex collection is part of the graph, but " + "does not exist. Finally check that _from or _to vertex do exist." + ) + raise DocumentUpdateError(resp, request, msg) return await self._executor.execute(request, response_handler) - async def update_many( + async def replace( self, - documents: Sequence[T], + edge: T, wait_for_sync: Optional[bool] = None, - ignore_revs: Optional[bool] = None, + keep_null: Optional[bool] = None, return_new: Optional[bool] = None, return_old: Optional[bool] = None, - silent: Optional[bool] = None, - keep_null: Optional[bool] = None, - merge_objects: Optional[bool] = None, - refill_index_caches: Optional[bool] = None, - version_attribute: Optional[str] = None, - ) -> Result[Jsons]: - """Insert multiple documents. - - Note: - If updating a document fails, the exception is not raised but - returned as an object in the "errors" list. It is up to you to - inspect the list to determine which documents were updated - successfully (returned as document metadata) and which were not - (returned as exception object). + if_match: Optional[str] = None, + ) -> Result[Json]: + """Replace an edge in the graph. Args: - documents (list): Documents to update. An item must contain the "_key" or - "_id" field. - wait_for_sync (bool | None): Wait until documents have been synced to disk. - ignore_revs (bool | None): If this is set to `False`, then any `_rev` - attribute given in a body document is taken as a precondition. The - document is only updated if the current revision is the one - specified. + edge (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field, along with "_from" and + "_to" fields. + wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. return_new (bool | None): Additionally return the complete new document under the attribute `new` in the result. return_old (bool | None): Additionally return the complete old document under the attribute `old` in the result. - silent (bool | None): If set to `True`, an empty object is returned as - response if all document operations succeed. No meta-data is returned - for the created documents. If any of the operations raises an error, - an array with the error object(s) is returned. - keep_null (bool | None): If set to `True`, fields with value None are - retained in the document. Otherwise, they are removed completely. - Applies only when **overwrite_mode** is set to "update" - (update-insert). - merge_objects (bool | None): If set to `True`, sub-dictionaries are merged - instead of the new one overwriting the old one. Applies only when - **overwrite_mode** is set to "update" (update-insert). - refill_index_caches (bool | None): Whether to add new entries to - in-memory index caches if document operations affect the edge index - or cache-enabled persistent indexes. - version_attribute (str | None): Support for simple external versioning to - document operations. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. Returns: - list: Documents metadata (e.g. document id, key, revision) and - errors or just errors if **silent** is set to `True`. + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "edge" field and two additional fields + ("new" and "old"). Raises: - DocumentUpdateError: If update fails. + DocumentRevisionError: If precondition was violated. + DocumentReplaceError: If replace fails. References: - - `update-multiple-documents `__ + - `replace-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: params["waitForSync"] = wait_for_sync - if ignore_revs is not None: - params["ignoreRevs"] = ignore_revs + if keep_null is not None: + params["keepNull"] = keep_null if return_new is not None: params["returnNew"] = return_new if return_old is not None: params["returnOld"] = return_old - if silent is not None: - params["silent"] = silent - if keep_null is not None: - params["keepNull"] = keep_null - if merge_objects is not None: - params["mergeObjects"] = merge_objects - if refill_index_caches is not None: - params["refillIndexCaches"] = refill_index_caches - if version_attribute is not None: - params["versionAttribute"] = version_attribute + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match request = Request( - method=Method.PATCH, - endpoint=f"/_api/document/{self.name}", - data=self._doc_serializer.dumps(documents), + method=Method.PUT, + endpoint=f"/_api/gharial/{self._graph}/edge/" + f"{self._get_doc_id(cast(Json, edge))}", params=params, + headers=headers, + data=self._doc_serializer.dumps(edge), ) - def response_handler( - resp: Response, - ) -> Jsons: - if not resp.is_success: - raise DocumentUpdateError(resp, request) - return self.deserializer.loads_many(resp.raw_body) + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self._parse_result(self.deserializer.loads(resp.raw_body)) + msg: Optional[str] = None + if resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + elif resp.status_code == HTTP_NOT_FOUND: + msg = ( + "The graph cannot be found or the edge collection is not " + "part of the graph. It is also possible that the vertex " + "collection referenced in the _from or _to attribute is not part " + "of the graph or the vertex collection is part of the graph, but " + "does not exist. Finally check that _from or _to vertex do exist." + ) + raise DocumentReplaceError(resp, request, msg) return await self._executor.execute(request, response_handler) - async def delete_many( + async def delete( self, - documents: Sequence[T], + edge: T, + ignore_missing: bool = False, wait_for_sync: Optional[bool] = None, - ignore_revs: Optional[bool] = None, return_old: Optional[bool] = None, - silent: Optional[bool] = None, - refill_index_caches: Optional[bool] = None, - ) -> Result[Jsons]: - """Delete multiple documents. - - Note: - If deleting a document fails, the exception is not raised but - returned as an object in the "errors" list. It is up to you to - inspect the list to determine which documents were deleted - successfully (returned as document metadata) and which were not - (returned as exception object). + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Delete an edge from the graph. Args: - documents (list): Documents to delete. An item must contain the "_key" or - "_id" field. - wait_for_sync (bool | None): Wait until documents have been synced to disk. - ignore_revs (bool | None): If this is set to `False`, then any `_rev` - attribute given in a body document is taken as a precondition. The - document is only updated if the current revision is the one - specified. + edge (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field, along with "_from" and + "_to" fields. + ignore_missing (bool): Do not raise an exception on missing document. + wait_for_sync (bool | None): Wait until operation has been synced to disk. return_old (bool | None): Additionally return the complete old document under the attribute `old` in the result. - silent (bool | None): If set to `True`, an empty object is returned as - response if all document operations succeed. No meta-data is returned - for the created documents. If any of the operations raises an error, - an array with the error object(s) is returned. - refill_index_caches (bool | None): Whether to add new entries to - in-memory index caches if document operations affect the edge index - or cache-enabled persistent indexes. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. Returns: - list: Documents metadata (e.g. document id, key, revision) and - errors or just errors if **silent** is set to `True`. + bool | dict: `True` if vertex was deleted successfully, `False` if vertex + was not found and **ignore_missing** was set to `True` (does not apply + in transactions). Old document is returned if **return_old** is set + to `True`. Raises: - DocumentRemoveError: If removal fails. + DocumentRevisionError: If precondition was violated. + DocumentDeleteError: If deletion fails. References: - - `remove-multiple-documents `__ + - `remove-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: params["waitForSync"] = wait_for_sync - if ignore_revs is not None: - params["ignoreRevs"] = ignore_revs if return_old is not None: params["returnOld"] = return_old - if silent is not None: - params["silent"] = silent - if refill_index_caches is not None: - params["refillIndexCaches"] = refill_index_caches + + headers: RequestHeaders = {} + if if_match is not None: + headers["If-Match"] = if_match request = Request( method=Method.DELETE, - endpoint=f"/_api/document/{self.name}", - data=self._doc_serializer.dumps(documents), + endpoint=f"/_api/gharial/{self._graph}/edge/" + f"{self._get_doc_id(cast(Json, edge))}", params=params, + headers=headers, ) - def response_handler( - resp: Response, - ) -> Jsons: + def response_handler(resp: Response) -> bool | Json: + if resp.is_success: + data: Json = self.deserializer.loads(resp.raw_body) + if "old" in data: + return cast(Json, data["old"]) + return True + msg: Optional[str] = None + if resp.status_code == HTTP_PRECONDITION_FAILED: + raise DocumentRevisionError(resp, request) + elif resp.status_code == HTTP_NOT_FOUND: + if resp.error_code == DOCUMENT_NOT_FOUND and ignore_missing: + return False + msg = ( + "Either the graph cannot be found, the edge collection is not " + "part of the graph, or the edge does not exist" + ) + raise DocumentDeleteError(resp, request, msg) + + return await self._executor.execute(request, response_handler) + + async def edges( + self, + vertex: str | Json, + direction: Optional[Literal["in", "out"]] = None, + allow_dirty_read: Optional[bool] = None, + ) -> Result[Json]: + """Return the edges starting or ending at the specified vertex. + + Args: + vertex (str | dict): Document ID, key or body. + direction (str | None): Direction of the edges to return. Selects `in` + or `out` direction for edges. If not set, any edges are returned. + allow_dirty_read (bool | None): Allow reads from followers in a cluster. + + Returns: + dict: List of edges and statistics. + + Raises: + EdgeListError: If retrieval fails. + + References: + - `get-inbound-and-outbound-edges `__ + """ # noqa: E501 + params: Params = { + "vertex": self._get_doc_id(vertex, validate=False), + } + if direction is not None: + params["direction"] = direction + + headers: RequestHeaders = {} + if allow_dirty_read is not None: + headers["x-arango-allow-dirty-read"] = ( + "true" if allow_dirty_read else "false" + ) + + request = Request( + method=Method.GET, + endpoint=f"/_api/edges/{self._name}", + params=params, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: if not resp.is_success: - raise DocumentDeleteError(resp, request) - return self.deserializer.loads_many(resp.raw_body) + raise EdgeListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + for key in ("error", "code"): + body.pop(key) + return body return await self._executor.execute(request, response_handler) + + async def link( + self, + from_vertex: str | Json, + to_vertex: str | Json, + data: Optional[Json] = None, + wait_for_sync: Optional[bool] = None, + return_new: bool = False, + ) -> Result[Json]: + """Insert a new edge document linking the given vertices. + + Args: + from_vertex (str | dict): "_from" vertex document ID or body with "_id" + field. + to_vertex (str | dict): "_to" vertex document ID or body with "_id" field. + data (dict | None): Any extra data for the new edge document. If it has + "_key" or "_id" field, its value is used as key of the new edge document + (otherwise it is auto-generated). + wait_for_sync (bool | None): Wait until operation has been synced to disk. + return_new: Optional[bool]: Additionally return the complete new document + under the attribute `new` in the result. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` is specified, the result contains the document + metadata in the "edge" field and the new document in the "new" field. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + """ + edge: Json = { + "_from": self._get_doc_id(from_vertex, validate=False), + "_to": self._get_doc_id(to_vertex, validate=False), + } + if data is not None: + edge.update(self._ensure_key_from_id(data)) + return await self.insert( + cast(T, edge), wait_for_sync=wait_for_sync, return_new=return_new + ) diff --git a/arangoasync/database.py b/arangoasync/database.py index 60f6ee9..3cac02d 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -88,6 +88,40 @@ class Database: def __init__(self, executor: ApiExecutor) -> None: self._executor = executor + def _get_doc_serializer( + self, + doc_serializer: Optional[Serializer[T]] = None, + ) -> Serializer[T]: + """Figure out the document serializer, defaulting to `Json`. + + Args: + doc_serializer (Serializer | None): Optional serializer. + + Returns: + Serializer: Either the passed serializer or the default one. + """ + if doc_serializer is None: + return cast(Serializer[T], self.serializer) + else: + return doc_serializer + + def _get_doc_deserializer( + self, + doc_deserializer: Optional[Deserializer[U, V]] = None, + ) -> Deserializer[U, V]: + """Figure out the document deserializer, defaulting to `Json`. + + Args: + doc_deserializer (Deserializer | None): Optional deserializer. + + Returns: + Deserializer: Either the passed deserializer or the default one. + """ + if doc_deserializer is None: + return cast(Deserializer[U, V], self.deserializer) + else: + return doc_deserializer + @property def connection(self) -> Connection: """Return the HTTP connection.""" @@ -390,17 +424,11 @@ def collection( Returns: StandardCollection: Collection API wrapper. """ - if doc_serializer is None: - serializer = cast(Serializer[T], self.serializer) - else: - serializer = doc_serializer - if doc_deserializer is None: - deserializer = cast(Deserializer[U, V], self.deserializer) - else: - deserializer = doc_deserializer - return StandardCollection[T, U, V]( - self._executor, name, serializer, deserializer + self._executor, + name, + self._get_doc_serializer(doc_serializer), + self._get_doc_deserializer(doc_deserializer), ) async def collections( @@ -604,16 +632,11 @@ async def create_collection( def response_handler(resp: Response) -> StandardCollection[T, U, V]: if not resp.is_success: raise CollectionCreateError(resp, request) - if doc_serializer is None: - serializer = cast(Serializer[T], self.serializer) - else: - serializer = doc_serializer - if doc_deserializer is None: - deserializer = cast(Deserializer[U, V], self.deserializer) - else: - deserializer = doc_deserializer return StandardCollection[T, U, V]( - self._executor, name, serializer, deserializer + self._executor, + name, + self._get_doc_serializer(doc_serializer), + self._get_doc_deserializer(doc_deserializer), ) return await self._executor.execute(request, response_handler) @@ -661,16 +684,30 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) - def graph(self, name: str) -> Graph: + def graph( + self, + name: str, + doc_serializer: Optional[Serializer[T]] = None, + doc_deserializer: Optional[Deserializer[U, V]] = None, + ) -> Graph[T, U, V]: """Return the graph API wrapper. Args: name (str): Graph name. + doc_serializer (Serializer): Custom document serializer. + This will be used only for document operations. + doc_deserializer (Deserializer): Custom document deserializer. + This will be used only for document operations. Returns: Graph: Graph API wrapper. """ - return Graph(self._executor, name) + return Graph[T, U, V]( + self._executor, + name, + self._get_doc_serializer(doc_serializer), + self._get_doc_deserializer(doc_deserializer), + ) async def has_graph(self, name: str) -> Result[bool]: """Check if a graph exists in the database. @@ -679,7 +716,7 @@ async def has_graph(self, name: str) -> Result[bool]: name (str): Graph name. Returns: - bool: True if the graph exists, False otherwise. + bool: `True` if the graph exists, `False` otherwise. Raises: GraphListError: If the operation fails. @@ -720,17 +757,23 @@ def response_handler(resp: Response) -> List[GraphProperties]: async def create_graph( self, name: str, + doc_serializer: Optional[Serializer[T]] = None, + doc_deserializer: Optional[Deserializer[U, V]] = None, edge_definitions: Optional[Sequence[Json]] = None, is_disjoint: Optional[bool] = None, is_smart: Optional[bool] = None, options: Optional[GraphOptions | Json] = None, orphan_collections: Optional[Sequence[str]] = None, wait_for_sync: Optional[bool] = None, - ) -> Result[Graph]: + ) -> Result[Graph[T, U, V]]: """Create a new graph. Args: name (str): Graph name. + doc_serializer (Serializer): Custom document serializer. + This will be used only for document operations. + doc_deserializer (Deserializer): Custom document deserializer. + This will be used only for document operations. edge_definitions (list | None): List of edge definitions, where each edge definition entry is a dictionary with fields "collection" (name of the edge collection), "from" (list of vertex collection names) and "to" @@ -782,10 +825,15 @@ async def create_graph( params=params, ) - def response_handler(resp: Response) -> Graph: - if resp.is_success: - return Graph(self._executor, name) - raise GraphCreateError(resp, request) + def response_handler(resp: Response) -> Graph[T, U, V]: + if not resp.is_success: + raise GraphCreateError(resp, request) + return Graph[T, U, V]( + self._executor, + name, + self._get_doc_serializer(doc_serializer), + self._get_doc_deserializer(doc_deserializer), + ) return await self._executor.execute(request, response_handler) diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index a62e64e..c4ee40a 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -263,6 +263,30 @@ class DocumentUpdateError(ArangoServerError): """Failed to update document.""" +class EdgeCollectionListError(ArangoServerError): + """Failed to retrieve edge collections.""" + + +class EdgeDefinitionListError(ArangoServerError): + """Failed to retrieve edge definitions.""" + + +class EdgeDefinitionCreateError(ArangoServerError): + """Failed to create edge definition.""" + + +class EdgeDefinitionReplaceError(ArangoServerError): + """Failed to replace edge definition.""" + + +class EdgeDefinitionDeleteError(ArangoServerError): + """Failed to delete edge definition.""" + + +class EdgeListError(ArangoServerError): + """Failed to retrieve edges coming in and out of a vertex.""" + + class GraphCreateError(ArangoServerError): """Failed to create the graph.""" @@ -275,6 +299,10 @@ class GraphListError(ArangoServerError): """Failed to retrieve graphs.""" +class GraphPropertiesError(ArangoServerError): + """Failed to retrieve graph properties.""" + + class IndexCreateError(ArangoServerError): """Failed to create collection index.""" @@ -389,3 +417,15 @@ class UserReplaceError(ArangoServerError): class UserUpdateError(ArangoServerError): """Failed to update user.""" + + +class VertexCollectionCreateError(ArangoServerError): + """Failed to create vertex collection.""" + + +class VertexCollectionDeleteError(ArangoServerError): + """Failed to delete vertex collection.""" + + +class VertexCollectionListError(ArangoServerError): + """Failed to retrieve vertex collections.""" diff --git a/arangoasync/graph.py b/arangoasync/graph.py index 2047d96..059a53e 100644 --- a/arangoasync/graph.py +++ b/arangoasync/graph.py @@ -1,16 +1,60 @@ +__all__ = ["Graph"] + + +from typing import Generic, List, Literal, Optional, Sequence, TypeVar, cast + +from arangoasync.collection import Collection, EdgeCollection, VertexCollection +from arangoasync.exceptions import ( + EdgeCollectionListError, + EdgeDefinitionCreateError, + EdgeDefinitionDeleteError, + EdgeDefinitionListError, + EdgeDefinitionReplaceError, + GraphPropertiesError, + VertexCollectionCreateError, + VertexCollectionDeleteError, + VertexCollectionListError, +) from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import ( + EdgeDefinitionOptions, + GraphProperties, + Json, + Jsons, + Params, + VertexCollectionOptions, +) +T = TypeVar("T") # Serializer type +U = TypeVar("U") # Deserializer loads +V = TypeVar("V") # Deserializer loads_many -class Graph: + +class Graph(Generic[T, U, V]): """Graph API wrapper, representing a graph in ArangoDB. Args: - executor: API executor. Required to execute the API requests. + executor (APIExecutor): Required to execute the API requests. + name (str): Graph name. + doc_serializer (Serializer): Document serializer. + doc_deserializer (Deserializer): Document deserializer. """ - def __init__(self, executor: ApiExecutor, name: str) -> None: + def __init__( + self, + executor: ApiExecutor, + name: str, + doc_serializer: Serializer[T], + doc_deserializer: Deserializer[U, V], + ) -> None: self._executor = executor self._name = name + self._doc_serializer = doc_serializer + self._doc_deserializer = doc_deserializer def __repr__(self) -> str: return f"" @@ -19,3 +63,988 @@ def __repr__(self) -> str: def name(self) -> str: """Name of the graph.""" return self._name + + @property + def db_name(self) -> str: + """Return the name of the current database. + + Returns: + str: Database name. + """ + return self._executor.db_name + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def properties(self) -> Result[GraphProperties]: + """Get the properties of the graph. + + Returns: + GraphProperties: Properties of the graph. + + Raises: + GraphProperties: If the operation fails. + + References: + - `get-a-graph `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/gharial/{self._name}") + + def response_handler(resp: Response) -> GraphProperties: + if not resp.is_success: + raise GraphPropertiesError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return GraphProperties(body["graph"]) + + return await self._executor.execute(request, response_handler) + + def vertex_collection(self, name: str) -> VertexCollection[T, U, V]: + """Returns the vertex collection API wrapper. + + Args: + name (str): Vertex collection name. + + Returns: + VertexCollection: Vertex collection API wrapper. + """ + return VertexCollection[T, U, V]( + executor=self._executor, + graph=self._name, + name=name, + doc_serializer=self._doc_serializer, + doc_deserializer=self._doc_deserializer, + ) + + async def vertex_collections(self) -> Result[List[str]]: + """Get the names of all vertex collections in the graph. + + Returns: + list: List of vertex collection names. + + Raises: + VertexCollectionListError: If the operation fails. + + References: + - `list-vertex-collections `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/gharial/{self._name}/vertex", + ) + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise VertexCollectionListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return list(sorted(body["collections"])) + + return await self._executor.execute(request, response_handler) + + async def has_vertex_collection(self, name: str) -> Result[bool]: + """Check if the graph has the given vertex collection. + + Args: + name (str): Vertex collection mame. + + Returns: + bool: `True` if the graph has the vertex collection, `False` otherwise. + + Raises: + VertexCollectionListError: If the operation fails. + """ + request = Request( + method=Method.GET, + endpoint=f"/_api/gharial/{self._name}/vertex", + ) + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise VertexCollectionListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return name in body["collections"] + + return await self._executor.execute(request, response_handler) + + async def create_vertex_collection( + self, + name: str, + options: Optional[VertexCollectionOptions | Json] = None, + ) -> Result[VertexCollection[T, U, V]]: + """Create a vertex collection in the graph. + + Args: + name (str): Vertex collection name. + options (dict | VertexCollectionOptions | None): Extra options for + creating vertex collections. + + Returns: + VertexCollection: Vertex collection API wrapper. + + Raises: + VertexCollectionCreateError: If the operation fails. + + References: + - `add-a-vertex-collection `__ + """ # noqa: E501 + data: Json = {"collection": name} + + if options is not None: + if isinstance(options, VertexCollectionOptions): + data["options"] = options.to_dict() + else: + data["options"] = options + + request = Request( + method=Method.POST, + endpoint=f"/_api/gharial/{self._name}/vertex", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> VertexCollection[T, U, V]: + if not resp.is_success: + raise VertexCollectionCreateError(resp, request) + return self.vertex_collection(name) + + return await self._executor.execute(request, response_handler) + + async def delete_vertex_collection(self, name: str, purge: bool = False) -> None: + """Remove a vertex collection from the graph. + + Args: + name (str): Vertex collection name. + purge (bool): If set to `True`, the vertex collection is not just deleted + from the graph but also from the database completely. Note that you + cannot remove vertex collections that are used in one of the edge + definitions of the graph. + + Raises: + VertexCollectionDeleteError: If the operation fails. + + References: + - `remove-a-vertex-collection `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, + endpoint=f"/_api/gharial/{self._name}/vertex/{name}", + params={"dropCollection": purge}, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise VertexCollectionDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def has_vertex( + self, + vertex: str | Json, + allow_dirty_read: bool = False, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[bool]: + """Check if the vertex exists in the graph. + + Args: + vertex (str | dict): Document ID, key or body. + Document body must contain the "_id" or "_key" field. + allow_dirty_read (bool): Allow reads from followers in a cluster. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + `True` if the document exists, `False` otherwise. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + """ # noqa: E501 + col = Collection.get_col_name(vertex) + return await self.vertex_collection(col).has( + vertex, + allow_dirty_read=allow_dirty_read, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def vertex( + self, + vertex: str | Json, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[Optional[Json]]: + """Return a vertex document. + + Args: + vertex (str | dict): Document ID, key or body. + Document body must contain the "_id" or "_key" field. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + Document or `None` if not found. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. + + References: + - `get-a-vertex `__ + """ # noqa: E501 + col = Collection.get_col_name(vertex) + return await self.vertex_collection(col).get( + vertex, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def insert_vertex( + self, + collection: str, + vertex: T, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + ) -> Result[Json]: + """Insert a new vertex document. + + Args: + collection (str): Name of the vertex collection to insert the document into. + vertex (dict): Document to insert. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` is specified, the result contains the document + metadata in the "vertex" field and the new document in the "new" field. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + + References: + - `create-a-vertex `__ + """ # noqa: E501 + return await self.vertex_collection(collection).insert( + vertex, + wait_for_sync=wait_for_sync, + return_new=return_new, + ) + + async def update_vertex( + self, + vertex: T, + wait_for_sync: Optional[bool] = None, + keep_null: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[Json]: + """Update a vertex in the graph. + + Args: + vertex (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field. + wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + if_match (str | None): You can conditionally update a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "vertex" field and two additional fields + ("new" and "old"). + + Raises: + DocumentUpdateError: If update fails. + + References: + - `update-a-vertex `__ + """ # noqa: E501 + col = Collection.get_col_name(cast(Json | str, vertex)) + return await self.vertex_collection(col).update( + vertex, + wait_for_sync=wait_for_sync, + keep_null=keep_null, + return_new=return_new, + return_old=return_old, + if_match=if_match, + ) + + async def replace_vertex( + self, + vertex: T, + wait_for_sync: Optional[bool] = None, + keep_null: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[Json]: + """Replace a vertex in the graph. + + Args: + vertex (dict): New document. It must contain the "_key" or "_id" field. + wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "vertex" field and two additional fields + ("new" and "old"). + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentReplaceError: If replace fails. + + References: + - `replace-a-vertex `__ + """ # noqa: E501 + col = Collection.get_col_name(cast(Json | str, vertex)) + return await self.vertex_collection(col).replace( + vertex, + wait_for_sync=wait_for_sync, + keep_null=keep_null, + return_new=return_new, + return_old=return_old, + if_match=if_match, + ) + + async def delete_vertex( + self, + vertex: T, + ignore_missing: bool = False, + wait_for_sync: Optional[bool] = None, + return_old: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Delete a vertex in the graph. + + Args: + vertex (dict): Document ID, key or body. The body must contain the + "_key" or "_id" field. + ignore_missing (bool): Do not raise an exception on missing document. + wait_for_sync (bool | None): Wait until operation has been synced to disk. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: `True` if vertex was deleted successfully, `False` if vertex + was not found and **ignore_missing** was set to `True` (does not apply + in transactions). Old document is returned if **return_old** is set + to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentDeleteError: If deletion fails. + + References: + - `remove-a-vertex `__ + """ # noqa: E501 + col = Collection.get_col_name(cast(Json | str, vertex)) + return await self.vertex_collection(col).delete( + vertex, + ignore_missing=ignore_missing, + wait_for_sync=wait_for_sync, + return_old=return_old, + if_match=if_match, + ) + + def edge_collection(self, name: str) -> EdgeCollection[T, U, V]: + """Returns the edge collection API wrapper. + + Args: + name (str): Edge collection name. + + Returns: + EdgeCollection: Edge collection API wrapper. + """ + return EdgeCollection[T, U, V]( + executor=self._executor, + graph=self._name, + name=name, + doc_serializer=self._doc_serializer, + doc_deserializer=self._doc_deserializer, + ) + + async def edge_definitions(self) -> Result[Jsons]: + """Return the edge definitions from the graph. + + Returns: + list: List of edge definitions. + + Raises: + EdgeDefinitionListError: If the operation fails. + """ + request = Request(method=Method.GET, endpoint=f"/_api/gharial/{self._name}") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise EdgeDefinitionListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + properties = GraphProperties(body["graph"]) + edge_definitions = properties.format( + GraphProperties.compatibility_formatter + )["edge_definitions"] + return cast(Jsons, edge_definitions) + + return await self._executor.execute(request, response_handler) + + async def has_edge_definition(self, name: str) -> Result[bool]: + """Check if the graph has the given edge definition. + + Returns: + bool: `True` if the graph has the edge definitions, `False` otherwise. + + Raises: + EdgeDefinitionListError: If the operation fails. + """ + request = Request(method=Method.GET, endpoint=f"/_api/gharial/{self._name}") + + def response_handler(resp: Response) -> bool: + if not resp.is_success: + raise EdgeDefinitionListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return any( + edge_definition["collection"] == name + for edge_definition in body["graph"]["edgeDefinitions"] + ) + + return await self._executor.execute(request, response_handler) + + async def edge_collections(self) -> Result[List[str]]: + """Get the names of all edge collections in the graph. + + Returns: + list: List of edge collection names. + + Raises: + EdgeCollectionListError: If the operation fails. + + References: + - `list-edge-collections `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/gharial/{self._name}/edge", + ) + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise EdgeCollectionListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return list(sorted(body["collections"])) + + return await self._executor.execute(request, response_handler) + + async def create_edge_definition( + self, + edge_collection: str, + from_vertex_collections: Sequence[str], + to_vertex_collections: Sequence[str], + options: Optional[EdgeDefinitionOptions | Json] = None, + ) -> Result[EdgeCollection[T, U, V]]: + """Create an edge definition in the graph. + + This edge definition has to contain a collection and an array of each from + and to vertex collections. + + .. code-block:: python + + { + "edge_collection": "edge_collection_name", + "from_vertex_collections": ["from_vertex_collection_name"], + "to_vertex_collections": ["to_vertex_collection_name"] + } + + Args: + edge_collection (str): Edge collection name. + from_vertex_collections (list): List of vertex collections + that can be used as the "from" vertex in edges. + to_vertex_collections (list): List of vertex collections + that can be used as the "to" vertex in edges. + options (dict | EdgeDefinitionOptions | None): Extra options for + creating edge definitions. + + Returns: + EdgeCollection: Edge collection API wrapper. + + Raises: + EdgeDefinitionCreateError: If the operation fails. + + References: + - `add-an-edge-definition `__ + """ # noqa: E501 + data: Json = { + "collection": edge_collection, + "from": from_vertex_collections, + "to": to_vertex_collections, + } + + if options is not None: + if isinstance(options, VertexCollectionOptions): + data["options"] = options.to_dict() + else: + data["options"] = options + + request = Request( + method=Method.POST, + endpoint=f"/_api/gharial/{self._name}/edge", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> EdgeCollection[T, U, V]: + if not resp.is_success: + raise EdgeDefinitionCreateError(resp, request) + return self.edge_collection(edge_collection) + + return await self._executor.execute(request, response_handler) + + async def replace_edge_definition( + self, + edge_collection: str, + from_vertex_collections: Sequence[str], + to_vertex_collections: Sequence[str], + options: Optional[EdgeDefinitionOptions | Json] = None, + wait_for_sync: Optional[bool] = None, + drop_collections: Optional[bool] = None, + ) -> Result[EdgeCollection[T, U, V]]: + """Replace an edge definition. + + Args: + edge_collection (str): Edge collection name. + from_vertex_collections (list): Names of "from" vertex collections. + to_vertex_collections (list): Names of "to" vertex collections. + options (dict | EdgeDefinitionOptions | None): Extra options for + modifying collections withing this edge definition. + wait_for_sync (bool | None): If set to `True`, the operation waits for + data to be synced to disk before returning. + drop_collections (bool | None): Drop the edge collection in addition to + removing it from the graph. The collection is only dropped if it is + not used in other graphs. + + Returns: + EdgeCollection: API wrapper. + + Raises: + EdgeDefinitionReplaceError: If the operation fails. + + References: + - `replace-an-edge-definition `__ + """ # noqa: E501 + data: Json = { + "collection": edge_collection, + "from": from_vertex_collections, + "to": to_vertex_collections, + } + if options is not None: + if isinstance(options, VertexCollectionOptions): + data["options"] = options.to_dict() + else: + data["options"] = options + + params: Params = {} + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if drop_collections is not None: + params["dropCollections"] = drop_collections + + request = Request( + method=Method.PUT, + endpoint=f"/_api/gharial/{self._name}/edge/{edge_collection}", + data=self.serializer.dumps(data), + params=params, + ) + + def response_handler(resp: Response) -> EdgeCollection[T, U, V]: + if resp.is_success: + return self.edge_collection(edge_collection) + raise EdgeDefinitionReplaceError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def delete_edge_definition( + self, + name: str, + drop_collections: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + ) -> None: + """Delete an edge definition from the graph. + + Args: + name (str): Edge collection name. + drop_collections (bool | None): If set to `True`, the edge definition is not + just removed from the graph but the edge collection is also deleted + completely from the database. + wait_for_sync (bool | None): If set to `True`, the operation waits for + changes to be synced to disk before returning. + + Raises: + EdgeDefinitionDeleteError: If the operation fails. + + References: + - `remove-an-edge-definition `__ + """ # noqa: E501 + params: Params = {} + if drop_collections is not None: + params["dropCollections"] = drop_collections + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + + request = Request( + method=Method.DELETE, + endpoint=f"/_api/gharial/{self._name}/edge/{name}", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise EdgeDefinitionDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def has_edge( + self, + edge: str | Json, + allow_dirty_read: bool = False, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[bool]: + """Check if the edge exists in the graph. + + Args: + edge (str | dict): Document ID, key or body. + Document body must contain the "_id" or "_key" field. + allow_dirty_read (bool): Allow reads from followers in a cluster. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + `True` if the document exists, `False` otherwise. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + """ # noqa: E501 + col = Collection.get_col_name(edge) + return await self.edge_collection(col).has( + edge, + allow_dirty_read=allow_dirty_read, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def edge( + self, + edge: str | Json, + rev: Optional[str] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[Optional[Json]]: + """Return an edge from the graph. + + Args: + edge (str | dict): Document ID, key or body. + Document body must contain the "_id" or "_key" field. + rev (str | None): If this is set a document is only returned if it + has exactly this revision. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + dict | None: Document or `None` if not found. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. + + References: + - `get-an-edge `__ + """ # noqa: E501 + col = Collection.get_col_name(edge) + return await self.edge_collection(col).get( + edge, + rev=rev, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def insert_edge( + self, + collection: str, + edge: T, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + ) -> Result[Json]: + """Insert a new edge document. + + Args: + collection (str): Name of the vertex collection to insert the document into. + edge (dict): Document to insert. It must contain "_from" and + "_to" fields. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` is specified, the result contains the document + metadata in the "edge" field and the new document in the "new" field. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + + References: + - `create-an-edge `__ + """ # noqa: E501 + return await self.edge_collection(collection).insert( + edge, + wait_for_sync=wait_for_sync, + return_new=return_new, + ) + + async def update_edge( + self, + edge: T, + wait_for_sync: Optional[bool] = None, + keep_null: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[Json]: + """Update a vertex in the graph. + + Args: + edge (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field, along with "_from" and + "_to" fields. + wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + if_match (str | None): You can conditionally update a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "edge" field and two additional fields + ("new" and "old"). + + Raises: + DocumentUpdateError: If update fails. + + References: + - `update-an-edge `__ + """ # noqa: E501 + col = Collection.get_col_name(cast(Json | str, edge)) + return await self.edge_collection(col).update( + edge, + wait_for_sync=wait_for_sync, + keep_null=keep_null, + return_new=return_new, + return_old=return_old, + if_match=if_match, + ) + + async def replace_edge( + self, + edge: T, + wait_for_sync: Optional[bool] = None, + keep_null: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[Json]: + """Replace an edge in the graph. + + Args: + edge (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field, along with "_from" and + "_to" fields. + wait_for_sync (bool | None): Wait until document has been synced to disk. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` or "return_old" are specified, the result contains + the document metadata in the "edge" field and two additional fields + ("new" and "old"). + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentReplaceError: If replace fails. + + References: + - `replace-an-edge `__ + """ # noqa: E501 + col = Collection.get_col_name(cast(Json | str, edge)) + return await self.edge_collection(col).replace( + edge, + wait_for_sync=wait_for_sync, + keep_null=keep_null, + return_new=return_new, + return_old=return_old, + if_match=if_match, + ) + + async def delete_edge( + self, + edge: T, + ignore_missing: bool = False, + wait_for_sync: Optional[bool] = None, + return_old: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Delete an edge from the graph. + + Args: + edge (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field, along with "_from" and + "_to" fields. + ignore_missing (bool): Do not raise an exception on missing document. + wait_for_sync (bool | None): Wait until operation has been synced to disk. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: `True` if vertex was deleted successfully, `False` if vertex + was not found and **ignore_missing** was set to `True` (does not apply + in transactions). Old document is returned if **return_old** is set + to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentDeleteError: If deletion fails. + + References: + - `remove-an-edge `__ + """ # noqa: E501 + col = Collection.get_col_name(cast(Json | str, edge)) + return await self.edge_collection(col).delete( + edge, + ignore_missing=ignore_missing, + wait_for_sync=wait_for_sync, + return_old=return_old, + if_match=if_match, + ) + + async def edges( + self, + collection: str, + vertex: str | Json, + direction: Optional[Literal["in", "out"]] = None, + allow_dirty_read: Optional[bool] = None, + ) -> Result[Json]: + """Return the edges starting or ending at the specified vertex. + + Args: + collection (str): Name of the edge collection to return edges from. + vertex (str | dict): Document ID, key or body. + direction (str | None): Direction of the edges to return. Selects `in` + or `out` direction for edges. If not set, any edges are returned. + allow_dirty_read (bool | None): Allow reads from followers in a cluster. + + Returns: + dict: List of edges and statistics. + + Raises: + EdgeListError: If retrieval fails. + + References: + - `get-inbound-and-outbound-edges `__ + """ # noqa: E501 + return await self.edge_collection(collection).edges( + vertex, + direction=direction, + allow_dirty_read=allow_dirty_read, + ) + + async def link( + self, + collection: str, + from_vertex: str | Json, + to_vertex: str | Json, + data: Optional[Json] = None, + wait_for_sync: Optional[bool] = None, + return_new: bool = False, + ) -> Result[Json]: + """Insert a new edge document linking the given vertices. + + Args: + collection (str): Name of the collection to insert the edge into. + from_vertex (str | dict): "_from" vertex document ID or body with "_id" + field. + to_vertex (str | dict): "_to" vertex document ID or body with "_id" field. + data (dict | None): Any extra data for the new edge document. If it has + "_key" or "_id" field, its value is used as key of the new edge document + (otherwise it is auto-generated). + wait_for_sync (bool | None): Wait until operation has been synced to disk. + return_new: Optional[bool]: Additionally return the complete new document + under the attribute `new` in the result. + + Returns: + dict: Document metadata (e.g. document id, key, revision). + If `return_new` is specified, the result contains the document + metadata in the "edge" field and the new document in the "new" field. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + """ + return await self.edge_collection(collection).link( + from_vertex, + to_vertex, + data=data, + wait_for_sync=wait_for_sync, + return_new=return_new, + ) diff --git a/arangoasync/typings.py b/arangoasync/typings.py index 86c32fd..280e27e 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -1692,6 +1692,32 @@ def __init__(self, data: Json) -> None: def name(self) -> str: return cast(str, self._data["name"]) + @property + def is_smart(self) -> bool: + """Check if the graph is a smart graph.""" + return cast(bool, self._data.get("isSmart", False)) + + @property + def is_satellite(self) -> bool: + """Check if the graph is a satellite graph.""" + return cast(bool, self._data.get("isSatellite", False)) + + @property + def number_of_shards(self) -> Optional[int]: + return cast(Optional[int], self._data.get("numberOfShards")) + + @property + def replication_factor(self) -> Optional[int | str]: + return cast(Optional[int | str], self._data.get("replicationFactor")) + + @property + def min_replication_factor(self) -> Optional[int]: + return cast(Optional[int], self._data.get("minReplicationFactor")) + + @property + def write_concern(self) -> Optional[int]: + return cast(Optional[int], self._data.get("writeConcern")) + @property def edge_definitions(self) -> Jsons: return cast(Jsons, self._data.get("edgeDefinitions", list())) @@ -1700,6 +1726,47 @@ def edge_definitions(self) -> Jsons: def orphan_collections(self) -> List[str]: return cast(List[str], self._data.get("orphanCollections", list())) + @staticmethod + def compatibility_formatter(data: Json) -> Json: + result: Json = {} + + if "_id" in data: + result["id"] = data["_id"] + if "_key" in data: + result["key"] = data["_key"] + if "name" in data: + result["name"] = data["name"] + if "_rev" in data: + result["revision"] = data["_rev"] + if "orphanCollections" in data: + result["orphan_collection"] = data["orphanCollections"] + if "edgeDefinitions" in data: + result["edge_definitions"] = [ + { + "edge_collection": edge_definition["collection"], + "from_vertex_collections": edge_definition["from"], + "to_vertex_collections": edge_definition["to"], + } + for edge_definition in data["edgeDefinitions"] + ] + if "isSmart" in data: + result["smart"] = data["isSmart"] + if "isDisjoint" in data: + result["disjoint"] = data["isDisjoint"] + if "isSatellite" in data: + result["is_satellite"] = data["isSatellite"] + if "smartGraphAttribute" in data: + result["smart_field"] = data["smartGraphAttribute"] + if "numberOfShards" in data: + result["shard_count"] = data["numberOfShards"] + if "replicationFactor" in data: + result["replication_factor"] = data["replicationFactor"] + if "minReplicationFactor" in data: + result["min_replication_factor"] = data["minReplicationFactor"] + if "writeConcern" in data: + result["write_concern"] = data["writeConcern"] + return result + class GraphOptions(JsonWrapper): """Special options for graph creation. @@ -1720,15 +1787,18 @@ class GraphOptions(JsonWrapper): Enterprise Edition. write_concern (int | None): The write concern for new collections in the graph. + + References: + - `create-a-graph `__ """ # noqa: E501 def __init__( self, - number_of_shards: Optional[int], - replication_factor: Optional[int | str], - satellites: Optional[List[str]], - smart_graph_attribute: Optional[str], - write_concern: Optional[int], + number_of_shards: Optional[int] = None, + replication_factor: Optional[int | str] = None, + satellites: Optional[List[str]] = None, + smart_graph_attribute: Optional[str] = None, + write_concern: Optional[int] = None, ) -> None: data: Json = dict() if number_of_shards is not None: @@ -1762,3 +1832,57 @@ def smart_graph_attribute(self) -> Optional[str]: @property def write_concern(self) -> Optional[int]: return cast(Optional[int], self._data.get("writeConcern")) + + +class VertexCollectionOptions(JsonWrapper): + """Special options for vertex collection creation. + + Args: + satellites (list): An array of collection names that is used to create + SatelliteCollections for a (Disjoint) SmartGraph using + SatelliteCollections (Enterprise Edition only). Each array element must + be a string and a valid collection name. + + References: + - `add-a-vertex-collection `__ + """ # noqa: E501 + + def __init__( + self, + satellites: Optional[List[str]] = None, + ) -> None: + data: Json = dict() + if satellites is not None: + data["satellites"] = satellites + super().__init__(data) + + @property + def satellites(self) -> Optional[List[str]]: + return cast(Optional[List[str]], self._data.get("satellites")) + + +class EdgeDefinitionOptions(JsonWrapper): + """Special options for edge definition creation. + + Args: + satellites (list): An array of collection names that is used to create + SatelliteCollections for a (Disjoint) SmartGraph using + SatelliteCollections (Enterprise Edition only). Each array element must + be a string and a valid collection name. + + References: + - `add-an-edge-definition `__ + """ # noqa: E501 + + def __init__( + self, + satellites: Optional[List[str]] = None, + ) -> None: + data: Json = dict() + if satellites is not None: + data["satellites"] = satellites + super().__init__(data) + + @property + def satellites(self) -> Optional[List[str]]: + return cast(Optional[List[str]], self._data.get("satellites")) diff --git a/docs/collection.rst b/docs/collection.rst index e6a846f..8dd3928 100644 --- a/docs/collection.rst +++ b/docs/collection.rst @@ -6,8 +6,10 @@ by its name which must consist only of hyphen, underscore and alphanumeric characters. There are three types of collections in python-arango: * **Standard Collection:** contains regular documents. -* **Vertex Collection:** contains vertex documents for graphs (not supported yet). -* **Edge Collection:** contains edge documents for graphs (not supported yet). +* **Vertex Collection:** contains vertex documents for graphs. See + :ref:`here ` for more details. +* **Edge Collection:** contains edge documents for graphs. See + :ref:`here ` for more details. Here is an example showing how you can manage standard collections: diff --git a/docs/document.rst b/docs/document.rst index ff9121e..571507e 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -42,6 +42,26 @@ collection: "friends": ["robin", "gordon"] } +.. _edge-documents: + +**Edge documents (edges)** are similar to standard documents but with two +additional required fields ``_from`` and ``_to``. Values of these fields must +be the handles of "from" and "to" vertex documents linked by the edge document +in question (see :doc:`graph` for details). Edge documents are contained in +:ref:`edge collections `. Here is an example of a valid edge +document in "friends" edge collection: + +.. code-block:: python + + { + "_id": "friends/001", + "_key": "001", + "_rev": "_Wm3d4le--_", + "_fro"': "students/john", + "_to": "students/jane", + "closeness": 9.5 + } + Standard documents are managed via collection API wrapper: .. code-block:: python diff --git a/docs/graph.rst b/docs/graph.rst new file mode 100644 index 0000000..0f0bbbf --- /dev/null +++ b/docs/graph.rst @@ -0,0 +1,415 @@ +Graphs +------ + +A **graph** consists of vertices and edges. Vertices are stored as documents in +:ref:`vertex collections ` and edges stored as documents in +:ref:`edge collections `. The collections used in a graph and +their relations are specified with :ref:`edge definitions `. +For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # List existing graphs in the database. + await db.graphs() + + # Create a new graph named "school" if it does not already exist. + # This returns an API wrapper for "school" graph. + if await db.has_graph("school"): + school = db.graph("school") + else: + school = await db.create_graph("school") + + # Retrieve various graph properties. + graph_name = school.name + db_name = school.db_name + vcols = await school.vertex_collections() + ecols = await school.edge_definitions() + + # Delete the graph. + await db.delete_graph("school") + +.. _edge-definitions: + +Edge Definitions +================ + +An **edge definition** specifies a directed relation in a graph. A graph can +have arbitrary number of edge definitions. Each edge definition consists of the +following components: + +* **From Vertex Collections:** contain "_from" vertices referencing "_to" vertices. +* **To Vertex Collections:** contain "_to" vertices referenced by "_from" vertices. +* **Edge Collection:** contains edges that link "_from" and "_to" vertices. + +Here is an example body of an edge definition: + +.. code-block:: python + + { + "edge_collection": "teach", + "from_vertex_collections": ["teachers"], + "to_vertex_collections": ["lectures"] + } + +Here is an example showing how edge definitions are managed: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + if await db.has_graph("school"): + school = db.graph("school") + else: + school = await db.create_graph("school") + + # Create an edge definition named "teach". This creates any missing + # collections and returns an API wrapper for "teach" edge collection. + # At first, create a wrong teachers->teachers mapping intentionally. + if not await school.has_edge_definition("teach"): + await school.create_edge_definition( + edge_collection="teach", + from_vertex_collections=["teachers"], + to_vertex_collections=["teachers"] + ) + + # List edge definitions. + edge_defs = await school.edge_definitions() + + # Replace with the correct edge definition. + await school.replace_edge_definition( + edge_collection="teach", + from_vertex_collections=["teachers"], + to_vertex_collections=["lectures"] + ) + + # Delete the edge definition (and its collections). + await school.delete_edge_definition("teach", drop_collections=True) + +.. _vertex-collections: + +Vertex Collections +================== + +A **vertex collection** contains vertex documents, and shares its namespace +with all other types of collections. Each graph can have an arbitrary number of +vertex collections. Vertex collections that are not part of any edge definition +are called **orphan collections**. You can manage vertex documents via standard +collection API wrappers, but using vertex collection API wrappers provides +additional safeguards: + +* All modifications are executed in transactions. +* If a vertex is deleted, all connected edges are also automatically deleted. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + school = db.graph("school") + + # Create a new vertex collection named "teachers" if it does not exist. + # This returns an API wrapper for "teachers" vertex collection. + if await school.has_vertex_collection("teachers"): + teachers = school.vertex_collection("teachers") + else: + teachers = await school.create_vertex_collection("teachers") + + # List vertex collections in the graph. + cols = await school.vertex_collections() + + # Vertex collections have similar interface as standard collections. + props = await teachers.properties() + await teachers.insert({"_key": "jon", "name": "Jon"}) + await teachers.update({"_key": "jon", "age": 35}) + await teachers.replace({"_key": "jon", "name": "Jon", "age": 36}) + await teachers.get("jon") + await teachers.has("jon") + await teachers.delete("jon") + +You can manage vertices via graph API wrappers also, but you must use document +IDs instead of keys where applicable. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + school = db.graph("school") + + # Create a new vertex collection named "lectures" if it does not exist. + # This returns an API wrapper for "lectures" vertex collection. + if await school.has_vertex_collection("lectures"): + school.vertex_collection("lectures") + else: + await school.create_vertex_collection("lectures") + + # The "_id" field is required instead of "_key" field (except for insert). + await school.insert_vertex("lectures", {"_key": "CSC101"}) + await school.update_vertex({"_id": "lectures/CSC101", "difficulty": "easy"}) + await school.replace_vertex({"_id": "lectures/CSC101", "difficulty": "hard"}) + await school.has_vertex("lectures/CSC101") + await school.vertex("lectures/CSC101") + await school.delete_vertex("lectures/CSC101") + +See :class:`arangoasync.graph.Graph` and :class:`arangoasync.collection.VertexCollection` for API specification. + +.. _edge-collections: + +Edge Collections +================ + +An **edge collection** contains :ref:`edge documents `, and +shares its namespace with all other types of collections. You can manage edge +documents via standard collection API wrappers, but using edge collection API +wrappers provides additional safeguards: + +* All modifications are executed in transactions. +* Edge documents are checked against the edge definitions on insert. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + if await db.has_graph("school"): + school = db.graph("school") + else: + school = await db.create_graph("school") + + if not await school.has_vertex_collection("lectures"): + await school.create_vertex_collection("lectures") + await school.insert_vertex("lectures", {"_key": "CSC101"}) + + if not await school.has_vertex_collection("teachers"): + await school.create_vertex_collection("teachers") + await school.insert_vertex("teachers", {"_key": "jon"}) + + # Get the API wrapper for edge collection "teach". + if await school.has_edge_definition("teach"): + teach = school.edge_collection("teach") + else: + teach = await school.create_edge_definition( + edge_collection="teach", + from_vertex_collections=["teachers"], + to_vertex_collections=["lectures"] + ) + + # Edge collections have a similar interface as standard collections. + await teach.insert({ + "_key": "jon-CSC101", + "_from": "teachers/jon", + "_to": "lectures/CSC101" + }) + await teach.replace({ + "_key": "jon-CSC101", + "_from": "teachers/jon", + "_to": "lectures/CSC101", + "online": False + }) + await teach.update({ + "_key": "jon-CSC101", + "online": True + }) + await teach.has("jon-CSC101") + await teach.get("jon-CSC101") + await teach.delete("jon-CSC101") + + # Create an edge between two vertices (essentially the same as insert). + await teach.link("teachers/jon", "lectures/CSC101", data={"online": False}) + + # List edges going in/out of a vertex. + inbound = await teach.edges("teachers/jon", direction="in") + outbound = await teach.edges("teachers/jon", direction="out") + +You can manage edges via graph API wrappers also, but you must use document +IDs instead of keys where applicable. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + if await db.has_graph("school"): + school = db.graph("school") + else: + school = await db.create_graph("school") + + if not await school.has_vertex_collection("lectures"): + await school.create_vertex_collection("lectures") + await school.insert_vertex("lectures", {"_key": "CSC101"}) + + if not await school.has_vertex_collection("teachers"): + await school.create_vertex_collection("teachers") + await school.insert_vertex("teachers", {"_key": "jon"}) + + # Create the edge collection "teach". + if not await school.has_edge_definition("teach"): + await school.create_edge_definition( + edge_collection="teach", + from_vertex_collections=["teachers"], + to_vertex_collections=["lectures"] + ) + + # The "_id" field is required instead of "_key" field. + await school.insert_edge( + collection="teach", + edge={ + "_id": "teach/jon-CSC101", + "_from": "teachers/jon", + "_to": "lectures/CSC101" + } + ) + await school.replace_edge({ + "_id": "teach/jon-CSC101", + "_from": "teachers/jon", + "_to": "lectures/CSC101", + "online": False, + }) + await school.update_edge({ + "_id": "teach/jon-CSC101", + "online": True + }) + await school.has_edge("teach/jon-CSC101") + await school.edge("teach/jon-CSC101") + await school.delete_edge("teach/jon-CSC101") + await school.link("teach", "teachers/jon", "lectures/CSC101") + await school.edges("teach", "teachers/jon", direction="out") + +See :class:`arangoasync.graph.Graph` and :class:`arangoasync.graph.EdgeCollection` for API specification. + +.. _graph-traversals: + +Graph Traversals +================ + +**Graph traversals** are executed via AQL. +Each traversal can span across multiple vertex collections, and walk +over edges and vertices using various algorithms. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + if await db.has_graph("school"): + school = db.graph("school") + else: + school = await db.create_graph("school") + + # Create vertex collections "lectures" and "teachers" if they do not exist. + if not await school.has_vertex_collection("lectures"): + await school.create_vertex_collection("lectures") + if not await school.has_vertex_collection("teachers"): + await school.create_vertex_collection("teachers") + + # Create the edge collection "teach". + if not await school.has_edge_definition("teach"): + await school.create_edge_definition( + edge_collection="teach", + from_vertex_collections=["teachers"], + to_vertex_collections=["lectures"] + ) + + # Get API wrappers for "from" and "to" vertex collections. + teachers = school.vertex_collection("teachers") + lectures = school.vertex_collection("lectures") + + # Get the API wrapper for the edge collection. + teach = school.edge_collection("teach") + + # Insert vertices into the graph. + await teachers.insert({"_key": "jon", "name": "Professor jon"}) + await lectures.insert({"_key": "CSC101", "name": "Introduction to CS"}) + await lectures.insert({"_key": "MAT223", "name": "Linear Algebra"}) + await lectures.insert({"_key": "STA201", "name": "Statistics"}) + + # Insert edges into the graph. + await teach.insert({"_from": "teachers/jon", "_to": "lectures/CSC101"}) + await teach.insert({"_from": "teachers/jon", "_to": "lectures/STA201"}) + await teach.insert({"_from": "teachers/jon", "_to": "lectures/MAT223"}) + + # AQL to perform a graph traversal. + # Traverse 1 to 3 hops from the vertex "teachers/jon", + query = """ + FOR v, e, p IN 1..3 OUTBOUND 'teachers/jon' GRAPH 'school' + OPTIONS { bfs: true, uniqueVertices: 'global' } + RETURN {vertex: v, edge: e, path: p} + """ + + # Traverse the graph in outbound direction, breath-first. + async with await db.aql.execute(query) as cursor: + async for lecture in cursor: + print(lecture) diff --git a/docs/index.rst b/docs/index.rst index 3252629..180c0ed 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -35,6 +35,7 @@ Contents collection indexes document + graph aql **Specialized Features** diff --git a/docs/overview.rst b/docs/overview.rst index 6f1f76a..f723234 100644 --- a/docs/overview.rst +++ b/docs/overview.rst @@ -39,7 +39,7 @@ Here is an example showing how **python-arango-async** client can be used: async for doc in cursor: student_names.append(doc["name"]) -You may also use the client without a context manager, but you must ensure to close the client when done: +You may also use the client without a context manager, but you must ensure to close the client when done. .. code-block:: python @@ -61,3 +61,65 @@ You may also use the client without a context manager, but you must ensure to cl # Close the client when done. await client.close() + +Another example with `graphs`_: + +.. _graphs: https://docs.arangodb.com/stable/graphs/ + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for graph "school". + if await db.has_graph("school"): + graph = db.graph("school") + else: + graph = await db.create_graph("school") + + # Create vertex collections for the graph. + students = await graph.create_vertex_collection("students") + lectures = await graph.create_vertex_collection("lectures") + + # Create an edge definition (relation) for the graph. + edges = await graph.create_edge_definition( + edge_collection="register", + from_vertex_collections=["students"], + to_vertex_collections=["lectures"] + ) + + # Insert vertex documents into "students" (from) vertex collection. + await students.insert({"_key": "01", "full_name": "Anna Smith"}) + await students.insert({"_key": "02", "full_name": "Jake Clark"}) + await students.insert({"_key": "03", "full_name": "Lisa Jones"}) + + # Insert vertex documents into "lectures" (to) vertex collection. + await lectures.insert({"_key": "MAT101", "title": "Calculus"}) + await lectures.insert({"_key": "STA101", "title": "Statistics"}) + await lectures.insert({"_key": "CSC101", "title": "Algorithms"}) + + # Insert edge documents into "register" edge collection. + await edges.insert({"_from": "students/01", "_to": "lectures/MAT101"}) + await edges.insert({"_from": "students/01", "_to": "lectures/STA101"}) + await edges.insert({"_from": "students/01", "_to": "lectures/CSC101"}) + await edges.insert({"_from": "students/02", "_to": "lectures/MAT101"}) + await edges.insert({"_from": "students/02", "_to": "lectures/STA101"}) + await edges.insert({"_from": "students/03", "_to": "lectures/CSC101"}) + + # Traverse the graph in outbound direction, breath-first. + query = """ + FOR v, e, p IN 1..3 OUTBOUND 'students/01' GRAPH 'school' + OPTIONS { bfs: true, uniqueVertices: 'global' } + RETURN {vertex: v, edge: e, path: p} + """ + + async with await db.aql.execute(query) as cursor: + async for doc in cursor: + print(doc) diff --git a/docs/serialization.rst b/docs/serialization.rst index 9fe520e..ed00702 100644 --- a/docs/serialization.rst +++ b/docs/serialization.rst @@ -80,6 +80,10 @@ that you are modeling your students data using Pydantic_. You want to be able to of a certain type, and also be able to read them back. More so, you would like to get multiple documents back using one of the formats provided by pandas_. +.. note:: + The driver assumes that the types support dictionary-like indexing, i.e. `doc["_id"]` + returns the id of the document. + **Example:** .. code-block:: python @@ -179,5 +183,7 @@ You would then use the custom serializer/deserializer when working with collecti students = await col.get_many(keys) assert type(students) == pd.DataFrame +See a full example in this `gist `__. + .. _Pydantic: https://docs.pydantic.dev/latest/ .. _pandas: https://pandas.pydata.org/ diff --git a/docs/specs.rst b/docs/specs.rst index dc92bd9..9983716 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -19,6 +19,9 @@ python-arango-async. .. automodule:: arangoasync.aql :members: +.. automodule:: arangoasync.graph + :members: + .. automodule:: arangoasync.job :members: diff --git a/tests/conftest.py b/tests/conftest.py index e91a591..98d75de 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,7 +8,12 @@ from arangoasync.auth import Auth, JwtToken from arangoasync.client import ArangoClient from arangoasync.typings import UserInfo -from tests.helpers import generate_col_name, generate_db_name, generate_username +from tests.helpers import ( + generate_col_name, + generate_db_name, + generate_graph_name, + generate_username, +) @dataclass @@ -19,6 +24,7 @@ class GlobalData: secret: str = None token: JwtToken = None sys_db_name: str = "_system" + graph_name: str = "test_graph" username: str = generate_username() cluster: bool = False enterprise: bool = False @@ -64,6 +70,7 @@ def pytest_configure(config): global_data.token = JwtToken.generate_token(global_data.secret) global_data.cluster = config.getoption("cluster") global_data.enterprise = config.getoption("enterprise") + global_data.graph_name = generate_graph_name() async def get_db_version(): async with ArangoClient(hosts=global_data.url) as client: @@ -215,6 +222,11 @@ async def bad_db(arango_client): ) +@pytest_asyncio.fixture +def bad_graph(bad_db): + return bad_db.graph(global_data.graph_name) + + @pytest_asyncio.fixture async def doc_col(db): col_name = generate_col_name() @@ -233,7 +245,7 @@ def db_version(): return global_data.db_version -@pytest_asyncio.fixture(scope="session", autouse=True) +@pytest_asyncio.fixture(autouse=True) async def teardown(): yield async with ArangoClient(hosts=global_data.url) as client: diff --git a/tests/helpers.py b/tests/helpers.py index cf8b3cb..8e91c26 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -19,6 +19,15 @@ def generate_col_name(): return f"test_collection_{uuid4().hex}" +def generate_graph_name(): + """Generate and return a random graph name. + + Returns: + str: Random graph name. + """ + return f"test_graph_{uuid4().hex}" + + def generate_username(): """Generate and return a random username. diff --git a/tests/test_graph.py b/tests/test_graph.py index 0967ff9..6d5fcbe 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -1,37 +1,404 @@ import pytest -from arangoasync.exceptions import GraphCreateError, GraphDeleteError, GraphListError +from arangoasync.exceptions import ( + DocumentDeleteError, + EdgeCollectionListError, + EdgeDefinitionDeleteError, + EdgeDefinitionListError, + EdgeDefinitionReplaceError, + EdgeListError, + GraphCreateError, + GraphDeleteError, + GraphListError, + GraphPropertiesError, + VertexCollectionCreateError, + VertexCollectionDeleteError, + VertexCollectionListError, +) +from arangoasync.typings import GraphOptions +from tests.helpers import generate_col_name, generate_graph_name @pytest.mark.asyncio async def test_graph_basic(db, bad_db): + graph1_name = generate_graph_name() # Test the graph representation - graph = db.graph("test_graph") - assert graph.name == "test_graph" - assert "test_graph" in repr(graph) + graph = db.graph(graph1_name) + assert graph.name == graph1_name + assert graph1_name in repr(graph) # Cannot find any graph + graph2_name = generate_graph_name() assert await db.graphs() == [] - assert await db.has_graph("fake_graph") is False + assert await db.has_graph(graph2_name) is False with pytest.raises(GraphListError): - await bad_db.has_graph("fake_graph") + await bad_db.has_graph(graph2_name) with pytest.raises(GraphListError): await bad_db.graphs() # Create a graph - graph = await db.create_graph("test_graph", wait_for_sync=True) - assert graph.name == "test_graph" + graph = await db.create_graph(graph1_name, wait_for_sync=True) + assert graph.name == graph1_name with pytest.raises(GraphCreateError): - await bad_db.create_graph("test_graph") + await bad_db.create_graph(graph1_name) # Check if the graph exists - assert await db.has_graph("test_graph") is True + assert await db.has_graph(graph1_name) is True graphs = await db.graphs() assert len(graphs) == 1 - assert graphs[0].name == "test_graph" + assert graphs[0].name == graph1_name # Delete the graph - await db.delete_graph("test_graph") - assert await db.has_graph("test_graph") is False + await db.delete_graph(graph1_name) + assert await db.has_graph(graph1_name) is False with pytest.raises(GraphDeleteError): - await bad_db.delete_graph("test_graph") + await bad_db.delete_graph(graph1_name) + + +@pytest.mark.asyncio +async def test_graph_properties(db, bad_graph, cluster, enterprise): + # Create a graph + name = generate_graph_name() + is_smart = cluster and enterprise + options = GraphOptions(number_of_shards=3) + graph = await db.create_graph(name, is_smart=is_smart, options=options) + + with pytest.raises(GraphPropertiesError): + await bad_graph.properties() + + # Create first vertex collection + vcol_name = generate_col_name() + vcol = await graph.create_vertex_collection(vcol_name) + assert vcol.name == vcol_name + + # Get the properties of the graph + properties = await graph.properties() + assert properties.name == name + assert properties.is_smart == is_smart + if cluster: + assert properties.number_of_shards == options.number_of_shards + assert properties.orphan_collections == [vcol_name] + + # Create second vertex collection + vcol2_name = generate_col_name() + vcol2 = await graph.create_vertex_collection(vcol2_name) + assert vcol2.name == vcol2_name + properties = await graph.properties() + assert len(properties.orphan_collections) == 2 + + # Create an edge definition + edge_name = generate_col_name() + edge_col = await graph.create_edge_definition( + edge_name, + from_vertex_collections=[vcol_name], + to_vertex_collections=[vcol2_name], + ) + assert edge_col.name == edge_name + + # There should be no more orphan collections + properties = await graph.properties() + assert len(properties.orphan_collections) == 0 + assert len(properties.edge_definitions) == 1 + assert properties.edge_definitions[0]["collection"] == edge_name + assert len(properties.edge_definitions[0]["from"]) == 1 + assert properties.edge_definitions[0]["from"][0] == vcol_name + assert len(properties.edge_definitions[0]["to"]) == 1 + assert properties.edge_definitions[0]["to"][0] == vcol2_name + + +@pytest.mark.asyncio +async def test_vertex_collections(db, docs, bad_graph): + # Test errors + with pytest.raises(VertexCollectionCreateError): + await bad_graph.create_vertex_collection("bad_col") + with pytest.raises(VertexCollectionListError): + await bad_graph.vertex_collections() + with pytest.raises(VertexCollectionListError): + await bad_graph.has_vertex_collection("bad_col") + with pytest.raises(VertexCollectionDeleteError): + await bad_graph.delete_vertex_collection("bad_col") + + # Create graph + graph = await db.create_graph(generate_graph_name()) + + # Create vertex collections + names = [generate_col_name() for _ in range(3)] + cols = [await graph.create_vertex_collection(name) for name in names] + + # List vertex collection + col_list = await graph.vertex_collections() + assert len(col_list) == 3 + for c in cols: + assert c.name in col_list + assert await graph.has_vertex_collection(c.name) + + # Delete collections + await graph.delete_vertex_collection(names[0]) + assert await graph.has_vertex_collection(names[0]) is False + + # Insert in both collections + v1_meta = await graph.insert_vertex(names[1], docs[0]) + v2_meta = await graph.insert_vertex(names[2], docs[1], return_new=True) + assert "new" in v2_meta + v2_meta = v2_meta["vertex"] + + # Get the vertex + v1 = await graph.vertex(v1_meta) + assert v1 is not None + assert v1["text"] == docs[0]["text"] + v2 = await graph.vertex(v2_meta["_id"]) + assert v2 is not None + v3 = await graph.vertex(f"{names[2]}/bad_id") + assert v3 is None + + # Update one vertex + v1["text"] = "updated_text" + v1_meta = await graph.update_vertex(v1, return_new=True) + assert "new" in v1_meta + assert "vertex" in v1_meta + v1 = await graph.vertex(v1_meta["vertex"]) + assert v1["text"] == "updated_text" + + # Replace the other vertex + v1["text"] = "replaced_text" + v1["additional"] = "data" + v1.pop("loc") + v1_meta = await graph.replace_vertex(v1, return_old=True, return_new=True) + assert "old" in v1_meta + assert "new" in v1_meta + assert "vertex" in v1_meta + v1 = await graph.vertex(v1_meta["vertex"]) + assert v1["text"] == "replaced_text" + assert "additional" in v1 + assert "loc" not in v1 + + # Delete a vertex + v1 = await graph.delete_vertex(v1["_id"], return_old=True) + assert "_id" in v1 + assert await graph.delete_vertex(v1["_id"], ignore_missing=True) is False + with pytest.raises(DocumentDeleteError): + assert await graph.delete_vertex(v1["_id"]) + + # Check has method + assert await graph.has_vertex(v1) is False + assert await graph.has_vertex(v2["_id"]) is True + + +@pytest.mark.asyncio +async def test_edge_collections(db, bad_graph): + # Test errors + with pytest.raises(EdgeDefinitionListError): + await bad_graph.edge_definitions() + with pytest.raises(EdgeDefinitionListError): + await bad_graph.has_edge_definition("bad_col") + with pytest.raises(EdgeCollectionListError): + await bad_graph.edge_collections() + with pytest.raises(EdgeDefinitionReplaceError): + await bad_graph.replace_edge_definition("foo", ["bar1"], ["bar2"]) + with pytest.raises(EdgeDefinitionDeleteError): + await bad_graph.delete_edge_definition("foo") + with pytest.raises(EdgeListError): + await bad_graph.edges("col", "foo") + + # Create full graph + name = generate_graph_name() + graph = await db.create_graph(name) + teachers_col_name = generate_col_name() + await db.create_collection(teachers_col_name) + await graph.create_vertex_collection(teachers_col_name) + students_col_name = generate_col_name() + await db.create_collection(students_col_name) + await graph.create_vertex_collection(students_col_name) + edge_col_name = generate_col_name() + edge_col = await graph.create_edge_definition( + edge_col_name, + from_vertex_collections=[teachers_col_name], + to_vertex_collections=[students_col_name], + ) + assert edge_col.name == edge_col_name + + # List edge definitions + edge_definitions = await graph.edge_definitions() + assert len(edge_definitions) == 1 + assert "edge_collection" in edge_definitions[0] + assert "from_vertex_collections" in edge_definitions[0] + assert "to_vertex_collections" in edge_definitions[0] + assert await graph.has_edge_definition(edge_col_name) is True + assert await graph.has_edge_definition("bad_edge") is False + + edge_cols = await graph.edge_collections() + assert len(edge_cols) == 1 + assert edge_col_name in edge_cols + + # Design the graph + teachers = [ + {"_key": "101", "name": "Mr. Smith"}, + {"_key": "102", "name": "Ms. Johnson"}, + {"_key": "103", "name": "Dr. Brown"}, + ] + students = [ + {"_key": "123", "name": "Alice"}, + {"_key": "456", "name": "Bob"}, + {"_key": "789", "name": "Charlie"}, + ] + edges = [ + { + "_from": f"{teachers_col_name}/101", + "_to": f"{students_col_name}/123", + "subject": "Math", + }, + { + "_from": f"{teachers_col_name}/102", + "_to": f"{students_col_name}/456", + "subject": "Science", + }, + { + "_from": f"{teachers_col_name}/103", + "_to": f"{students_col_name}/789", + "subject": "History", + }, + ] + + # Create an edge + edge_metas = [] + for idx in range(len(edges)): + await graph.insert_vertex(teachers_col_name, teachers[idx]) + await graph.insert_vertex(students_col_name, students[idx]) + edge_meta = await graph.insert_edge( + edge_col_name, + edges[0], + return_new=True, + ) + assert "new" in edge_meta + edge_metas.append(edge_meta) + + # Check for edge existence + edge_meta = edge_metas[0] + edge_id = edge_meta["new"]["_id"] + assert await graph.has_edge(edge_id) is True + assert await graph.has_edge(f"{edge_col_name}/bad_id") is False + edge = await graph.edge(edge_id) + assert edge is not None + + # Update an edge + edge["subject"] = "Advanced Math" + updated_edge_meta = await graph.update_edge(edge, return_new=True, return_old=True) + assert "new" in updated_edge_meta + assert "old" in updated_edge_meta + assert "edge" in updated_edge_meta + edge = await graph.edge(edge_id) + assert edge["subject"] == "Advanced Math" + + # Replace an edge + edge["subject"] = "Replaced Subject" + edge["extra_info"] = "Some additional data" + replaced_edge_meta = await graph.replace_edge( + edge, return_old=True, return_new=True + ) + assert "old" in replaced_edge_meta + assert "new" in replaced_edge_meta + assert "edge" in replaced_edge_meta + edge = await graph.edge(edge_id) + assert edge["subject"] == "Replaced Subject" + + # Delete the edge + deleted_edge = await graph.delete_edge(edge_id, return_old=True) + assert "_id" in deleted_edge + assert await graph.has_edge(edge_id) is False + + # Replace the edge definition + new_from_collections = [students_col_name] + new_to_collections = [teachers_col_name] + replaced_edge_col = await graph.replace_edge_definition( + edge_col_name, + from_vertex_collections=new_from_collections, + to_vertex_collections=new_to_collections, + ) + assert replaced_edge_col.name == edge_col_name + + # Verify the updated edge definition + edge_definitions = await graph.edge_definitions() + assert len(edge_definitions) == 1 + assert edge_definitions[0]["edge_collection"] == edge_col_name + assert edge_definitions[0]["from_vertex_collections"] == new_from_collections + assert edge_definitions[0]["to_vertex_collections"] == new_to_collections + + # Delete the edge definition + await graph.delete_edge_definition(edge_col_name) + assert await graph.has_edge_definition(edge_col_name) is False + + +@pytest.mark.asyncio +async def test_edge_links(db): + # Create full graph + name = generate_graph_name() + graph = await db.create_graph(name) + + # Teachers collection + teachers_col_name = generate_col_name() + await db.create_collection(teachers_col_name) + await graph.create_vertex_collection(teachers_col_name) + + # Students collection + students_col_name = generate_col_name() + await db.create_collection(students_col_name) + await graph.create_vertex_collection(students_col_name) + + # Edges + teachers_to_students = generate_col_name() + await graph.create_edge_definition( + teachers_to_students, + from_vertex_collections=[teachers_col_name], + to_vertex_collections=[students_col_name], + ) + students_to_students = generate_col_name() + await graph.create_edge_definition( + students_to_students, + from_vertex_collections=[teachers_col_name, students_col_name], + to_vertex_collections=[students_col_name], + ) + + # Populate the graph + teachers = [ + {"_key": "101", "name": "Mr. Smith"}, + {"_key": "102", "name": "Ms. Johnson"}, + {"_key": "103", "name": "Dr. Brown"}, + ] + students = [ + {"_key": "123", "name": "Alice"}, + {"_key": "456", "name": "Bob"}, + {"_key": "789", "name": "Charlie"}, + ] + + docs = [] + t = await graph.insert_vertex(teachers_col_name, teachers[0]) + s = await graph.insert_vertex(students_col_name, students[0]) + await graph.link(teachers_to_students, t, s, {"subject": "Math"}) + docs.append(s) + + t = await graph.insert_vertex(teachers_col_name, teachers[1]) + s = await graph.insert_vertex(students_col_name, students[1]) + await graph.link(teachers_to_students, t["_id"], s["_id"], {"subject": "Science"}) + docs.append(s) + + t = await graph.insert_vertex(teachers_col_name, teachers[2]) + s = await graph.insert_vertex(students_col_name, students[2]) + await graph.link(teachers_to_students, t, s, {"subject": "History"}) + docs.append(s) + + await graph.link(students_to_students, docs[0], docs[1], {"friendship": "close"}) + await graph.link(students_to_students, docs[1], docs[0], {"friendship": "close"}) + + edges = await graph.edges(students_to_students, docs[0]) + assert len(edges["edges"]) == 2 + assert "stats" in edges + + await graph.link(students_to_students, docs[2], docs[0], {"friendship": "close"}) + edges = await graph.edges(students_to_students, docs[0], direction="in") + assert len(edges["edges"]) == 2 + + edges = await graph.edges(students_to_students, docs[0], direction="out") + assert len(edges["edges"]) == 1 + + edges = await graph.edges(students_to_students, docs[0]) + assert len(edges["edges"]) == 3 diff --git a/tests/test_typings.py b/tests/test_typings.py index 7a40c33..fd04fa1 100644 --- a/tests/test_typings.py +++ b/tests/test_typings.py @@ -4,6 +4,7 @@ CollectionInfo, CollectionStatus, CollectionType, + EdgeDefinitionOptions, GraphOptions, GraphProperties, JsonWrapper, @@ -17,6 +18,7 @@ QueryProperties, QueryTrackingConfiguration, UserInfo, + VertexCollectionOptions, ) @@ -368,3 +370,19 @@ def test_GraphOptions(): assert graph_options.satellites == ["satellite1", "satellite2"] assert graph_options.smart_graph_attribute == "region" assert graph_options.write_concern == 1 + + +def test_VertexCollectionOptions(): + options = VertexCollectionOptions( + satellites=["col1", "col2"], + ) + + assert options.satellites == ["col1", "col2"] + + +def test_EdgeDefinitionOptions(): + options = EdgeDefinitionOptions( + satellites=["col1", "col2"], + ) + + assert options.satellites == ["col1", "col2"] From ce278946e6d66af492dd7e739152c9782be9ac20 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 1 Jun 2025 12:54:54 +0300 Subject: [PATCH 09/47] Bumping version number (#53) --- arangoasync/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arangoasync/version.py b/arangoasync/version.py index 27fdca4..81f0fde 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "0.0.3" +__version__ = "0.0.4" From 7a5d1985e5b8daa15dd39fc1b0bf3dfc6bb58251 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Thu, 5 Jun 2025 09:04:28 +0000 Subject: [PATCH 10/47] Updating logo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 507c3e9..ab24eae 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Logo](docs/static/logo.png) +![Logo](https://raw.githubusercontent.com/arangodb/python-arango-async/refs/heads/main/docs/static/logo.png) [![CircleCI](https://dl.circleci.com/status-badge/img/gh/arangodb/python-arango-async/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/arangodb/python-arango-async/tree/main) [![CodeQL](https://github.com/arangodb/python-arango-async/actions/workflows/codeql.yaml/badge.svg)](https://github.com/arangodb/python-arango-async/actions/workflows/codeql.yaml) From 83054a31a48c6b328ee82b2ef2cec81777e0e9b6 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 8 Jun 2025 14:08:06 +0300 Subject: [PATCH 11/47] More on document manipulation (#54) * Document manipulation directly from DB class * Removing collection parameter * Updating docs --- arangoasync/collection.py | 8 +- arangoasync/database.py | 347 +++++++++++++++++++++++++++++++++++++- docs/document.rst | 50 ++++++ tests/test_document.py | 48 ++++++ 4 files changed, 449 insertions(+), 4 deletions(-) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index c742714..c34c1aa 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -1653,7 +1653,7 @@ def response_handler(resp: Response) -> bool | Json: async def delete( self, - document: T, + document: str | T, ignore_revs: Optional[bool] = None, ignore_missing: bool = False, wait_for_sync: Optional[bool] = None, @@ -1665,7 +1665,7 @@ async def delete( """Delete a document. Args: - document (dict): Document ID, key or body. The body must contain the + document (str | dict): Document ID, key or body. The body must contain the "_key" or "_id" field. ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the document is ignored. If this is set to `False`, then the `_rev` @@ -1697,6 +1697,8 @@ async def delete( References: - `remove-a-document `__ """ # noqa: E501 + handle = self._get_doc_id(cast(str | Json, document)) + params: Params = {} if ignore_revs is not None: params["ignoreRevs"] = ignore_revs @@ -1715,7 +1717,7 @@ async def delete( request = Request( method=Method.DELETE, - endpoint=f"/_api/document/{self._extract_id(cast(Json, document))}", + endpoint=f"/_api/document/{handle}", params=params, headers=headers, ) diff --git a/arangoasync/database.py b/arangoasync/database.py index 3cac02d..dcca837 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -10,7 +10,7 @@ from warnings import warn from arangoasync.aql import AQL -from arangoasync.collection import StandardCollection +from arangoasync.collection import Collection, StandardCollection from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND from arangoasync.exceptions import ( @@ -684,6 +684,351 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def has_document( + self, + document: str | Json, + allow_dirty_read: bool = False, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[bool]: + """Check if a document exists. + + Args: + document (str | dict): Document ID, key or body. + Document body must contain the "_id" field. + allow_dirty_read (bool): Allow reads from followers in a cluster. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + `True` if the document exists, `False` otherwise. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + + References: + - `get-a-document-header `__ + """ # noqa: E501 + col = Collection.get_col_name(document) + return await self.collection(col).has( + document, + allow_dirty_read=allow_dirty_read, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def document( + self, + document: str | Json, + allow_dirty_read: bool = False, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[Optional[Json]]: + """Return a document. + + Args: + document (str | dict): Document ID, key or body. + Document body must contain the "_id" field. + allow_dirty_read (bool): Allow reads from followers in a cluster. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + Document or `None` if not found. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. + + References: + - `get-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.get( + document, + allow_dirty_read=allow_dirty_read, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def insert_document( + self, + collection: str, + document: Json, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + overwrite: Optional[bool] = None, + overwrite_mode: Optional[str] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + ) -> Result[bool | Json]: + """Insert a new document. + + Args: + collection (str): Collection name. + document (dict): Document to insert. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. Only available if the + `overwrite` option is used. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + overwrite (bool | None): If set to `True`, operation does not fail on + duplicate key and existing document is overwritten (replace-insert). + overwrite_mode (str | None): Overwrite mode. Supersedes **overwrite** + option. May be one of "ignore", "replace", "update" or "conflict". + keep_null (bool | None): If set to `True`, fields with value None are + retained in the document. Otherwise, they are removed completely. + Applies only when **overwrite_mode** is set to "update" + (update-insert). + merge_objects (bool | None): If set to `True`, sub-dictionaries are merged + instead of the new one overwriting the old one. Applies only when + **overwrite_mode** is set to "update" (update-insert). + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document insertions affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. Only applicable if **overwrite** is set to `True` + or **overwrite_mode** is set to "update" or "replace". + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + + References: + - `create-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection(collection) + return await col.insert( + document, + wait_for_sync=wait_for_sync, + return_new=return_new, + return_old=return_old, + silent=silent, + overwrite=overwrite, + overwrite_mode=overwrite_mode, + keep_null=keep_null, + merge_objects=merge_objects, + refill_index_caches=refill_index_caches, + version_attribute=version_attribute, + ) + + async def update_document( + self, + document: Json, + ignore_revs: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Update a document. + + Args: + document (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only updated if the current revision is the one + specified. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + merge_objects (bool | None): Controls whether objects (not arrays) are + merged if present in both the existing and the patch document. + If set to `False`, the value in the patch document overwrites the + existing document’s value. If set to `True`, objects are merged. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + if_match (str | None): You can conditionally update a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentUpdateError: If update fails. + + References: + - `update-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.update( + document, + ignore_revs=ignore_revs, + wait_for_sync=wait_for_sync, + return_new=return_new, + return_old=return_old, + silent=silent, + keep_null=keep_null, + merge_objects=merge_objects, + refill_index_caches=refill_index_caches, + version_attribute=version_attribute, + if_match=if_match, + ) + + async def replace_document( + self, + document: Json, + ignore_revs: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Replace a document. + + Args: + document (dict): New document. It must contain the "_key" or "_id" field. + Edge document must also have "_from" and "_to" fields. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only replaced if the current revision is the one + specified. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentReplaceError: If replace fails. + + References: + - `replace-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.replace( + document, + ignore_revs=ignore_revs, + wait_for_sync=wait_for_sync, + return_new=return_new, + return_old=return_old, + silent=silent, + refill_index_caches=refill_index_caches, + version_attribute=version_attribute, + if_match=if_match, + ) + + async def delete_document( + self, + document: str | Json, + ignore_revs: Optional[bool] = None, + ignore_missing: bool = False, + wait_for_sync: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Delete a document. + + Args: + document (str | dict): Document ID, key or body. The body must contain the + "_key" or "_id" field. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only replaced if the current revision is the one + specified. + ignore_missing (bool): Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + wait_for_sync (bool | None): Wait until operation has been synced to disk. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + if_match (bool | None): You can conditionally remove a document based + on a target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True` and the document was found. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentDeleteError: If deletion fails. + + References: + - `remove-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.delete( + document, + ignore_revs=ignore_revs, + ignore_missing=ignore_missing, + wait_for_sync=wait_for_sync, + return_old=return_old, + silent=silent, + refill_index_caches=refill_index_caches, + if_match=if_match, + ) + def graph( self, name: str, diff --git a/docs/document.rst b/docs/document.rst index 571507e..c0764e8 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -150,4 +150,54 @@ Standard documents are managed via collection API wrapper: # Delete one or more matching documents. await students.delete_match({"first": "Emma"}) +You can manage documents via database API wrappers also, but only simple +operations (i.e. get, insert, update, replace, delete) are supported and you +must provide document IDs instead of keys: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create a new collection named "students" if it does not exist. + if not await db.has_collection("students"): + await db.create_collection("students") + + # Create some test documents to play around with. + # The documents must have the "_id" field instead. + lola = {"_id": "students/lola", "GPA": 3.5} + abby = {"_id": "students/abby", "GPA": 3.2} + john = {"_id": "students/john", "GPA": 3.6} + emma = {"_id": "students/emma", "GPA": 4.0} + + # Insert a new document. + metadata = await db.insert_document("students", lola) + assert metadata["_id"] == "students/lola" + assert metadata["_key"] == "lola" + + # Check if a document exists. + assert await db.has_document(lola) is True + + # Get a document (by ID or body with "_id" field). + await db.document("students/lola") + await db.document(abby) + + # Update a document. + lola["GPA"] = 3.6 + await db.update_document(lola) + + # Replace a document. + lola["GPA"] = 3.4 + await db.replace_document(lola) + + # Delete a document (by ID or body with "_id" field). + await db.delete_document("students/lola") + See :class:`arangoasync.database.StandardDatabase` and :class:`arangoasync.collection.StandardCollection` for API specification. diff --git a/tests/test_document.py b/tests/test_document.py index fbfd2b3..741ec34 100644 --- a/tests/test_document.py +++ b/tests/test_document.py @@ -566,3 +566,51 @@ async def test_document_delete_match(doc_col, bad_col, docs): await doc_col.insert_many(docs) count = await doc_col.delete_match({"text": "no_matching"}) assert count == 0 + + +@pytest.mark.asyncio +async def test_document_db_operations(db, bad_db, doc_col, docs): + # Insert a document through the collection API + doc = await doc_col.insert(docs[0]) + + # Check if the document exists in the database + assert await db.has_document(doc) is True + assert await db.has_document({"_id": "missing_col/missing_doc"}) is False + assert await db.has_document("missing_doc") is False + with pytest.raises(DocumentGetError): + await bad_db.has_document(doc) + + # Get the document + doc2 = await db.document(doc["_id"]) + assert doc2["_id"] == doc["_id"] + with pytest.raises(DocumentGetError): + await bad_db.document(doc["_id"]) + + # Insert a new document + doc = await db.insert_document(doc_col.name, docs[1]) + assert doc["_id"] == f"{doc_col.name}/{doc['_key']}" + with pytest.raises(DocumentInsertError): + await bad_db.insert_document(doc_col.name, docs[2]) + + # Update the document + doc["val"] = 100 + updated_doc = await db.update_document(doc, return_new=True) + assert updated_doc["_id"] == doc["_id"] + assert updated_doc["new"]["val"] == 100 + with pytest.raises(DocumentUpdateError): + await bad_db.update_document(doc) + + # Replace the document + doc["val"] = 200 + replaced_doc = await db.replace_document(doc, return_new=True) + assert replaced_doc["_id"] == doc["_id"] + assert replaced_doc["new"]["val"] == 200 + with pytest.raises(DocumentReplaceError): + await bad_db.replace_document(doc) + + # Delete the document + deleted_doc = await db.delete_document(doc["_id"], return_old=True) + assert deleted_doc["_id"] == doc["_id"] + assert deleted_doc["old"]["val"] == 200 + with pytest.raises(DocumentDeleteError): + await bad_db.delete_document(doc) From 7f0ed63307058d5d13324b18c0379d8099b19944 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 8 Jun 2025 12:59:44 +0000 Subject: [PATCH 12/47] Updating to last version --- starter.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 starter.sh diff --git a/starter.sh b/starter.sh old mode 100644 new mode 100755 index be1778a..3eef281 --- a/starter.sh +++ b/starter.sh @@ -6,7 +6,7 @@ # Usage: # ./starter.sh [single|cluster] [community|enterprise] [version] # Example: -# ./starter.sh cluster enterprise 3.11.4 +# ./starter.sh cluster enterprise 3.12.4 setup="${1:-single}" license="${2:-community}" From 1d08ba46965a3313482b2df948648e1ae595788e Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 8 Jun 2025 13:06:02 +0000 Subject: [PATCH 13/47] Adding view management --- arangoasync/database.py | 238 ++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 28 +++++ tests/helpers.py | 9 ++ tests/test_view.py | 137 ++++++++++++++++++++++ 4 files changed, 412 insertions(+) create mode 100644 tests/test_view.py diff --git a/arangoasync/database.py b/arangoasync/database.py index dcca837..998c6dd 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -46,6 +46,13 @@ UserListError, UserReplaceError, UserUpdateError, + ViewCreateError, + ViewDeleteError, + ViewGetError, + ViewListError, + ViewRenameError, + ViewReplaceError, + ViewUpdateError, ) from arangoasync.executor import ( ApiExecutor, @@ -1223,6 +1230,237 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def view(self, name: str) -> Result[Json]: + """Return the properties of a view. + + Args: + name (str): View name. + + Returns: + dict: View properties. + + Raises: + ViewGetError: If the operation fails. + + References: + - `read-properties-of-a-view `__ + - `get-the-properties-of-a-view `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/view/{name}/properties") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ViewGetError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def view_info(self, name: str) -> Result[Json]: + """Return basic information about a specific view. + + Args: + name (str): View name. + + Returns: + dict: View information. + + Raises: + ViewGetError: If the operation fails. + + References: + - `get-information-about-a-view `_ + - `get-information-about-a-view `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/view/{name}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ViewGetError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def views(self) -> Result[Jsons]: + """List all views in the database along with their summary information. + + Returns: + list: List of views with their properties. + + Raises: + ViewListError: If the operation fails. + + References: + - `list-all-views `__ + - `list-all-views `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/view") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise ViewListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return cast(Jsons, body["result"]) + + return await self._executor.execute(request, response_handler) + + async def create_view( + self, + name: str, + view_type: str, + properties: Optional[Json] = None, + ) -> Result[Json]: + """Create a view. + + Args: + name (str): View name. + view_type (str): Type of the view (e.g., "arangosearch", "view"). + properties (dict | None): Properties of the view. + + Returns: + dict: View properties. + + Raises: + ViewCreateError: If the operation fails. + + References: + - `create-a-search-alias-view `__ + - `create-an-arangosearch-view `__ + """ # noqa: E501 + data: Json = {"name": name, "type": view_type} + if properties is not None: + data.update(properties) + + request = Request( + method=Method.POST, + endpoint="/_api/view", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ViewCreateError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def replace_view(self, name: str, properties: Json) -> Result[Json]: + """Replace the properties of an existing view. + + Args: + name (str): View name. + properties (dict): New properties for the view. + + Returns: + dict: Updated view properties. + + Raises: + ViewReplaceError: If the operation fails. + + References: + - `replace-the-properties-of-a-search-alias-view `__ + - `replace-the-properties-of-an-arangosearch-view `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/view/{name}/properties", + data=self.serializer.dumps(properties), + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self.deserializer.loads(resp.raw_body) + raise ViewReplaceError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def update_view(self, name: str, properties: Json) -> Result[Json]: + """Update the properties of an existing view. + + Args: + name (str): View name. + properties (dict): New properties for the view. + + Returns: + dict: Updated view properties. + + Raises: + ViewUpdateError: If the operation fails. + + References: + - `update-the-properties-of-a-search-alias-view `__ + - `update-the-properties-of-an-arangosearch-view `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint=f"/_api/view/{name}/properties", + data=self.serializer.dumps(properties), + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self.deserializer.loads(resp.raw_body) + raise ViewUpdateError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def rename_view(self, name: str, new_name: str) -> None: + """Rename an existing view (not supported in cluster deployments). + + Args: + name (str): Current view name. + new_name (str): New view name. + + Raises: + ViewRenameError: If the operation fails. + + References: + - `rename-a-view `__ + - `rename-a-view `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/view/{name}/rename", + data=self.serializer.dumps({"name": new_name}), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ViewRenameError(resp, request) + + await self._executor.execute(request, response_handler) + + async def delete_view( + self, name: str, ignore_missing: bool = False + ) -> Result[bool]: + """Delete a view. + + Args: + name (str): View name. + ignore_missing (bool): If `True`, do not raise an exception if the + view does not exist. + + Returns: + bool: `True` if the view was deleted successfully, `False` if the + view was not found and **ignore_missing** was set to `True`. + + Raises: + ViewDeleteError: If the operation fails. + + References: + - `drop-a-view `__ + - `drop-a-view `__ + """ # noqa: E501 + request = Request(method=Method.DELETE, endpoint=f"/_api/view/{name}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise ViewDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + async def has_user(self, username: str) -> Result[bool]: """Check if a user exists. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index c4ee40a..4e46d06 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -429,3 +429,31 @@ class VertexCollectionDeleteError(ArangoServerError): class VertexCollectionListError(ArangoServerError): """Failed to retrieve vertex collections.""" + + +class ViewCreateError(ArangoServerError): + """Failed to create view.""" + + +class ViewDeleteError(ArangoServerError): + """Failed to delete view.""" + + +class ViewGetError(ArangoServerError): + """Failed to retrieve view details.""" + + +class ViewListError(ArangoServerError): + """Failed to retrieve views.""" + + +class ViewRenameError(ArangoServerError): + """Failed to rename view.""" + + +class ViewReplaceError(ArangoServerError): + """Failed to replace view.""" + + +class ViewUpdateError(ArangoServerError): + """Failed to update view.""" diff --git a/tests/helpers.py b/tests/helpers.py index 8e91c26..b961064 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -44,3 +44,12 @@ def generate_string(): str: Random unique string. """ return uuid4().hex + + +def generate_view_name(): + """Generate and return a random view name. + + Returns: + str: Random view name. + """ + return f"test_view_{uuid4().hex}" diff --git a/tests/test_view.py b/tests/test_view.py new file mode 100644 index 0000000..80b2388 --- /dev/null +++ b/tests/test_view.py @@ -0,0 +1,137 @@ +import pytest + +from arangoasync import errno +from arangoasync.exceptions import ( + ViewCreateError, + ViewDeleteError, + ViewGetError, + ViewListError, + ViewRenameError, + ViewReplaceError, + ViewUpdateError, +) +from tests.helpers import generate_view_name + + +@pytest.mark.asyncio +async def test_view_management(db, bad_db, doc_col, cluster): + # Create a view + view_name = generate_view_name() + bad_view_name = generate_view_name() + view_type = "arangosearch" + + result = await db.create_view( + view_name, + view_type, + {"consolidationIntervalMsec": 50000, "links": {doc_col.name: {}}}, + ) + assert "id" in result + assert result["name"] == view_name + assert result["type"] == view_type + assert result["consolidationIntervalMsec"] == 50000 + assert doc_col.name in result["links"] + + # Create view with bad database + with pytest.raises(ViewCreateError): + await bad_db.create_view( + view_name, + view_type, + {"consolidationIntervalMsec": 50000, "links": {doc_col.name: {}}}, + ) + + view_id = result["id"] + + # Test create duplicate view + with pytest.raises(ViewCreateError) as err: + await db.create_view(view_name, view_type, {"consolidationIntervalMsec": 50000}) + assert err.value.error_code == errno.DUPLICATE_NAME + + # Test get view (properties) + view = await db.view(view_name) + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + assert view["consolidationIntervalMsec"] == 50000 + + # Test get missing view + with pytest.raises(ViewGetError) as err: + await db.view(bad_view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test get view info + view_info = await db.view_info(view_name) + assert view_info["id"] == view_id + assert view_info["name"] == view_name + assert view_info["type"] == view_type + assert "consolidationIntervalMsec" not in view_info + with pytest.raises(ViewGetError) as err: + await db.view_info(bad_view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test list views + result = await db.views() + assert len(result) == 1 + view = result[0] + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + + # Test list views with bad database + with pytest.raises(ViewListError) as err: + await bad_db.views() + assert err.value.error_code == errno.FORBIDDEN + + # Test replace view + view = await db.replace_view(view_name, {"consolidationIntervalMsec": 40000}) + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + assert view["consolidationIntervalMsec"] == 40000 + + # Test replace view with bad database + with pytest.raises(ViewReplaceError) as err: + await bad_db.replace_view(view_name, {"consolidationIntervalMsec": 7000}) + assert err.value.error_code == errno.FORBIDDEN + + # Test update view + view = await db.update_view(view_name, {"consolidationIntervalMsec": 70000}) + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + assert view["consolidationIntervalMsec"] == 70000 + + # Test update view with bad database + with pytest.raises(ViewUpdateError) as err: + await bad_db.update_view(view_name, {"consolidationIntervalMsec": 80000}) + assert err.value.error_code == errno.FORBIDDEN + + # Test rename view + new_view_name = generate_view_name() + if cluster: + with pytest.raises(ViewRenameError): + await db.rename_view(view_name, new_view_name) + new_view_name = view_name + else: + await db.rename_view(view_name, new_view_name) + result = await db.views() + assert len(result) == 1 + view = result[0] + assert view["id"] == view_id + assert view["name"] == new_view_name + + # Test rename missing view + with pytest.raises(ViewRenameError) as err: + await db.rename_view(bad_view_name, view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test delete view + assert await db.delete_view(new_view_name) is True + assert len(await db.views()) == 0 + + # Test delete missing view + with pytest.raises(ViewDeleteError) as err: + await db.delete_view(new_view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test delete missing view with ignore_missing set to True + assert await db.delete_view(view_name, ignore_missing=True) is False From 1d3b3f59917f74e882057cdb63451588a548d79f Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 8 Jun 2025 13:06:29 +0000 Subject: [PATCH 14/47] Adding view docs --- docs/index.rst | 1 + docs/view.rst | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) create mode 100644 docs/view.rst diff --git a/docs/index.rst b/docs/index.rst index 180c0ed..f30ed6e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -44,6 +44,7 @@ Contents :maxdepth: 1 transaction + view **API Executions** diff --git a/docs/view.rst b/docs/view.rst new file mode 100644 index 0000000..f680b54 --- /dev/null +++ b/docs/view.rst @@ -0,0 +1,69 @@ +Views +----- + +All types of views are supported. . For more information on **view** +management, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Retrieve list of views. + await db.views() + + # Create a view. + await db.create_view( + name="foo", + view_type="arangosearch", + properties={ + "cleanupIntervalStep": 0, + "consolidationIntervalMsec": 0 + } + ) + + # Rename a view (not supported in cluster deployments). + await db.rename_view("foo", "bar") + + # Retrieve view properties. + await db.view("bar") + + # Retrieve view summary. + await db.view_info("bar") + + # Partially update view properties. + await db.update_view( + name="bar", + properties={ + "cleanupIntervalStep": 1000, + "consolidationIntervalMsec": 200 + } + ) + + # Replace view properties. Unspecified ones are reset to default. + await db.replace_view( + name="bar", + properties={"cleanupIntervalStep": 2000} + ) + + # Delete a view. + await db.delete_view("bar") + +For more information on the content of view **properties**, +see `Search Alias Views`_ and `Arangosearch Views`_. + +.. _Search Alias Views: https://docs.arangodb.com/stable/develop/http-api/views/search-alias-views/ +.. _Arangosearch Views: https://docs.arangodb.com/stable/develop/http-api/views/arangosearch-views/ + +Refer to :class:`arangoasync.database.StandardDatabase` class for API specification. From b6ec0ae902e78475221e4d7299fd575870ad18e5 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 15 Jun 2025 15:24:01 +0300 Subject: [PATCH 15/47] Analyzers (#55) * Listing analyzers * Adding the rest of analyzers API, plus tests * Adding analyzer documentation --- arangoasync/database.py | 131 ++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 16 +++++ arangoasync/response.py | 16 ++++- docs/analyzer.rst | 39 ++++++++++++ docs/index.rst | 1 + tests/helpers.py | 9 +++ tests/test_analyzer.py | 91 ++++++++++++++++++++++++++ 7 files changed, 302 insertions(+), 1 deletion(-) create mode 100644 docs/analyzer.rst create mode 100644 tests/test_analyzer.py diff --git a/arangoasync/database.py b/arangoasync/database.py index 998c6dd..345df74 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -14,6 +14,10 @@ from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND from arangoasync.exceptions import ( + AnalyzerCreateError, + AnalyzerDeleteError, + AnalyzerGetError, + AnalyzerListError, AsyncJobClearError, AsyncJobListError, CollectionCreateError, @@ -1461,6 +1465,133 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def analyzers(self) -> Result[Jsons]: + """List all analyzers in the database. + + Returns: + list: List of analyzers with their properties. + + Raises: + AnalyzerListError: If the operation fails. + + References: + - `list-all-analyzers `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/analyzer") + + def response_handler(resp: Response) -> Jsons: + if resp.is_success: + result: Jsons = self.deserializer.loads(resp.raw_body)["result"] + return result + raise AnalyzerListError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def analyzer(self, name: str) -> Result[Json]: + """Return analyzer details. + + Args: + name (str): Analyzer name. + + Returns: + dict: Analyzer properties. + + References: + - `get-an-analyzer-definition `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/analyzer/{name}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AnalyzerGetError(resp, request) + return Response.format_body(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def create_analyzer( + self, + name: str, + analyzer_type: str, + properties: Optional[Json] = None, + features: Optional[Sequence[str]] = None, + ) -> Result[Json]: + """Create an analyzer. + + Args: + name (str): Analyzer name. + analyzer_type (str): Type of the analyzer (e.g., "text", "identity"). + properties (dict | None): Properties of the analyzer. + features (list | None): The set of features to set on the Analyzer + generated fields. The default value is an empty array. Possible values: + "frequency", "norm", "position", "offset". + + Returns: + dict: Analyzer properties. + + Raises: + AnalyzerCreateError: If the operation fails. + + References: + - `create-an-analyzer `__ + """ # noqa: E501 + data: Json = {"name": name, "type": analyzer_type} + if properties is not None: + data["properties"] = properties + if features is not None: + data["features"] = features + + request = Request( + method=Method.POST, + endpoint="/_api/analyzer", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AnalyzerCreateError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def delete_analyzer( + self, name: str, force: Optional[bool] = None, ignore_missing: bool = False + ) -> Result[bool]: + """Delete an analyzer. + + Args: + name (str): Analyzer name. + force (bool | None): Remove the analyzer configuration even if in use. + ignore_missing (bool): Do not raise an exception on missing analyzer. + + Returns: + bool: `True` if the analyzer was deleted successfully, `False` if the + analyzer was not found and **ignore_missing** was set to `True`. + + Raises: + AnalyzerDeleteError: If the operation fails. + + References: + - `remove-an-analyzer `__ + """ # noqa: E501 + params: Params = {} + if force is not None: + params["force"] = force + + request = Request( + method=Method.DELETE, + endpoint=f"/_api/analyzer/{name}", + params=params, + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise AnalyzerDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + async def has_user(self, username: str) -> Result[bool]: """Check if a user exists. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 4e46d06..e052fd4 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -135,6 +135,22 @@ class AQLQueryValidateError(ArangoServerError): """Failed to parse and validate query.""" +class AnalyzerCreateError(ArangoServerError): + """Failed to create analyzer.""" + + +class AnalyzerGetError(ArangoServerError): + """Failed to retrieve analyzer details.""" + + +class AnalyzerDeleteError(ArangoServerError): + """Failed to delete analyzer.""" + + +class AnalyzerListError(ArangoServerError): + """Failed to retrieve analyzers.""" + + class AsyncExecuteError(ArangoServerError): """Failed to execute async API request.""" diff --git a/arangoasync/response.py b/arangoasync/response.py index 63b10fb..000def9 100644 --- a/arangoasync/response.py +++ b/arangoasync/response.py @@ -5,7 +5,7 @@ from typing import Optional from arangoasync.request import Method -from arangoasync.typings import ResponseHeaders +from arangoasync.typings import Json, ResponseHeaders class Response: @@ -63,3 +63,17 @@ def __init__( self.error_code: Optional[int] = None self.error_message: Optional[str] = None self.is_success: Optional[bool] = None + + @staticmethod + def format_body(body: Json) -> Json: + """Format the generic response body, stripping the error code and message. + + Args: + body (Json): The response body. + + Returns: + dict: The formatted response body. + """ + body.pop("error", None) + body.pop("code", None) + return body diff --git a/docs/analyzer.rst b/docs/analyzer.rst new file mode 100644 index 0000000..cd92018 --- /dev/null +++ b/docs/analyzer.rst @@ -0,0 +1,39 @@ +Analyzers +--------- + +For more information on analyzers, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create an analyzer. + await db.create_analyzer( + name='test_analyzer', + analyzer_type='identity', + properties={}, + features=[] + ) + + # Retrieve the created analyzer. + analyzer = await db.analyzer('test_analyzer') + + # Retrieve list of analyzers. + await db.analyzers() + + # Delete an analyzer. + await db.delete_analyzer('test_analyzer', ignore_missing=True) + +Refer to :class:`arangoasync.database.StandardDatabase` class for API specification. diff --git a/docs/index.rst b/docs/index.rst index f30ed6e..375303c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -45,6 +45,7 @@ Contents transaction view + analyzer **API Executions** diff --git a/tests/helpers.py b/tests/helpers.py index b961064..f2f63f7 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -53,3 +53,12 @@ def generate_view_name(): str: Random view name. """ return f"test_view_{uuid4().hex}" + + +def generate_analyzer_name(): + """Generate and return a random analyzer name. + + Returns: + str: Random analyzer name. + """ + return f"test_analyzer_{uuid4().hex}" diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py new file mode 100644 index 0000000..856b6d7 --- /dev/null +++ b/tests/test_analyzer.py @@ -0,0 +1,91 @@ +import pytest +from packaging import version + +from arangoasync.exceptions import ( + AnalyzerCreateError, + AnalyzerDeleteError, + AnalyzerGetError, + AnalyzerListError, +) +from tests.helpers import generate_analyzer_name + + +@pytest.mark.asyncio +async def test_analyzer_management(db, bad_db, enterprise, db_version): + analyzer_name = generate_analyzer_name() + full_analyzer_name = db.name + "::" + analyzer_name + bad_analyzer_name = generate_analyzer_name() + + # Test create identity analyzer + result = await db.create_analyzer(analyzer_name, "identity") + assert result["name"] == full_analyzer_name + assert result["type"] == "identity" + assert result["properties"] == {} + assert result["features"] == [] + + # Test create delimiter analyzer + result = await db.create_analyzer( + name=generate_analyzer_name(), + analyzer_type="delimiter", + properties={"delimiter": ","}, + ) + assert result["type"] == "delimiter" + assert result["properties"] == {"delimiter": ","} + assert result["features"] == [] + + # Test create duplicate with bad database + with pytest.raises(AnalyzerCreateError): + await bad_db.create_analyzer(analyzer_name, "identity") + + # Test get analyzer + result = await db.analyzer(analyzer_name) + assert result["name"] == full_analyzer_name + assert result["type"] == "identity" + assert result["properties"] == {} + assert result["features"] == [] + + # Test get missing analyzer + with pytest.raises(AnalyzerGetError): + await db.analyzer(bad_analyzer_name) + + # Test list analyzers + result = await db.analyzers() + assert full_analyzer_name in [a["name"] for a in result] + + # Test list analyzers with bad database + with pytest.raises(AnalyzerListError): + await bad_db.analyzers() + + # Test delete analyzer + assert await db.delete_analyzer(analyzer_name, force=True) is True + assert full_analyzer_name not in [a["name"] for a in await db.analyzers()] + + # Test delete missing analyzer + with pytest.raises(AnalyzerDeleteError): + await db.delete_analyzer(analyzer_name) + + # Test delete missing analyzer with ignore_missing set to True + assert await db.delete_analyzer(analyzer_name, ignore_missing=True) is False + + # Test create geo_s2 analyzer + if enterprise: + analyzer_name = generate_analyzer_name() + result = await db.create_analyzer(analyzer_name, "geo_s2", properties={}) + assert result["type"] == "geo_s2" + assert await db.delete_analyzer(analyzer_name) + + if db_version >= version.parse("3.12.0"): + # Test delimiter analyzer with multiple delimiters + result = await db.create_analyzer( + name=generate_analyzer_name(), + analyzer_type="multi_delimiter", + properties={"delimiters": [",", "."]}, + ) + assert result["type"] == "multi_delimiter" + assert result["properties"] == {"delimiters": [",", "."]} + + # Test wildcard analyzer + analyzer_name = generate_analyzer_name() + result = await db.create_analyzer(analyzer_name, "wildcard", {"ngramSize": 4}) + assert result["type"] == "wildcard" + assert result["properties"] == {"ngramSize": 4} From 224ba77e2761eae29e81af66d85fdec7621d1790 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 15 Jun 2025 16:45:51 +0300 Subject: [PATCH 16/47] Formatting response bodies (#56) --- arangoasync/aql.py | 8 ++++---- arangoasync/collection.py | 5 +---- arangoasync/database.py | 8 ++++---- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/arangoasync/aql.py b/arangoasync/aql.py index 57d57e1..b81cade 100644 --- a/arangoasync/aql.py +++ b/arangoasync/aql.py @@ -597,7 +597,7 @@ async def explain( def response_handler(resp: Response) -> Json: if not resp.is_success: raise AQLQueryExplainError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -625,7 +625,7 @@ async def validate(self, query: str) -> Result[Json]: def response_handler(resp: Response) -> Json: if not resp.is_success: raise AQLQueryValidateError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -719,7 +719,7 @@ async def create_function( def response_handler(resp: Response) -> Json: if not resp.is_success: raise AQLFunctionCreateError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -760,6 +760,6 @@ def response_handler(resp: Response) -> Json: if not resp.is_success: if not (resp.status_code == HTTP_NOT_FOUND and ignore_missing): raise AQLFunctionDeleteError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index c34c1aa..810ee06 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -2592,10 +2592,7 @@ async def edges( def response_handler(resp: Response) -> Json: if not resp.is_success: raise EdgeListError(resp, request) - body = self.deserializer.loads(resp.raw_body) - for key in ("error", "code"): - body.pop(key) - return body + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) diff --git a/arangoasync/database.py b/arangoasync/database.py index 345df74..dbcc319 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -1255,7 +1255,7 @@ async def view(self, name: str) -> Result[Json]: def response_handler(resp: Response) -> Json: if not resp.is_success: raise ViewGetError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -1280,7 +1280,7 @@ async def view_info(self, name: str) -> Result[Json]: def response_handler(resp: Response) -> Json: if not resp.is_success: raise ViewGetError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -2006,7 +2006,7 @@ def response_handler(resp: Response) -> Json: if not resp.is_success: raise JWTSecretListError(resp, request) result: Json = self.deserializer.loads(resp.raw_body) - return result + return Response.format_body(result) return await self._executor.execute(request, response_handler) @@ -2028,7 +2028,7 @@ def response_handler(resp: Response) -> Json: if not resp.is_success: raise JWTSecretReloadError(resp, request) result: Json = self.deserializer.loads(resp.raw_body) - return result + return Response.format_body(result) return await self._executor.execute(request, response_handler) From 8972237ce4df6630dc8eeea859b27c4610025508 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 15 Jun 2025 13:47:37 +0000 Subject: [PATCH 17/47] Bumping version number --- arangoasync/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arangoasync/version.py b/arangoasync/version.py index 81f0fde..b1a19e3 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "0.0.4" +__version__ = "0.0.5" From d3e7ae752bfb9fa1f20a69eadfd8711adc518628 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Wed, 9 Jul 2025 04:49:28 +0300 Subject: [PATCH 18/47] Skipping _db prefix when using /_open/auth (#57) --- arangoasync/connection.py | 12 +++++++++--- tests/test_connection.py | 6 ++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arangoasync/connection.py b/arangoasync/connection.py index f404248..21fa756 100644 --- a/arangoasync/connection.py +++ b/arangoasync/connection.py @@ -160,11 +160,16 @@ def compress_request(self, request: Request) -> bool: return result - async def process_request(self, request: Request) -> Response: + async def process_request( + self, + request: Request, + skip_db_prefix: bool = False, + ) -> Response: """Process request, potentially trying multiple hosts. Args: request (Request): Request object. + skip_db_prefix (bool): If `True`, do not prepend the database endpoint. Returns: Response: Response object. @@ -173,7 +178,8 @@ async def process_request(self, request: Request) -> Response: ConnectionAbortedError: If it can't connect to host(s) within limit. """ - request.endpoint = f"{self._db_endpoint}{request.endpoint}" + if not skip_db_prefix: + request.endpoint = f"{self._db_endpoint}{request.endpoint}" host_index = self._host_resolver.get_host_index() for tries in range(self._host_resolver.max_tries): try: @@ -376,7 +382,7 @@ async def refresh_token(self) -> None: ) try: - resp = await self.process_request(request) + resp = await self.process_request(request, skip_db_prefix=True) except ClientConnectionAbortedError as e: raise JWTRefreshError(str(e)) from e except ServerConnectionError as e: diff --git a/tests/test_connection.py b/tests/test_connection.py index 568815c..e053e58 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -223,6 +223,12 @@ async def test_JwtConnection_ping_success( status_code = await connection1.ping() assert status_code == 200 + # Refresh the token + await connection3.refresh_token() + status_code = await connection1.ping() + assert status_code == 200 + assert connection3.token != connection1.token + @pytest.mark.asyncio async def test_JwtSuperuserConnection_ping_success( From 52493cb1a500ab340a720286f2b7b7bb547191e1 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 14 Jul 2025 10:15:27 +0300 Subject: [PATCH 19/47] Refactored request to skip db prefix (#58) --- arangoasync/connection.py | 7 +++---- arangoasync/database.py | 4 +++- arangoasync/request.py | 5 +++++ 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/arangoasync/connection.py b/arangoasync/connection.py index 21fa756..5fa6363 100644 --- a/arangoasync/connection.py +++ b/arangoasync/connection.py @@ -163,13 +163,11 @@ def compress_request(self, request: Request) -> bool: async def process_request( self, request: Request, - skip_db_prefix: bool = False, ) -> Response: """Process request, potentially trying multiple hosts. Args: request (Request): Request object. - skip_db_prefix (bool): If `True`, do not prepend the database endpoint. Returns: Response: Response object. @@ -178,7 +176,7 @@ async def process_request( ConnectionAbortedError: If it can't connect to host(s) within limit. """ - if not skip_db_prefix: + if request.prefix_needed: request.endpoint = f"{self._db_endpoint}{request.endpoint}" host_index = self._host_resolver.get_host_index() for tries in range(self._host_resolver.max_tries): @@ -379,10 +377,11 @@ async def refresh_token(self) -> None: method=Method.POST, endpoint="/_open/auth", data=auth.encode("utf-8"), + prefix_needed=False, ) try: - resp = await self.process_request(request, skip_db_prefix=True) + resp = await self.process_request(request) except ClientConnectionAbortedError as e: raise JWTRefreshError(str(e)) from e except ServerConnectionError as e: diff --git a/arangoasync/database.py b/arangoasync/database.py index dbcc319..c188290 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -2022,7 +2022,9 @@ async def reload_jwt_secrets(self) -> Result[Json]: References: - `hot-reload-the-jwt-secrets-from-disk `__ """ # noqa: 501 - request = Request(method=Method.POST, endpoint="/_admin/server/jwt") + request = Request( + method=Method.POST, endpoint="/_admin/server/jwt", prefix_needed=False + ) def response_handler(resp: Response) -> Json: if not resp.is_success: diff --git a/arangoasync/request.py b/arangoasync/request.py index 951c9e9..6bd629d 100644 --- a/arangoasync/request.py +++ b/arangoasync/request.py @@ -33,6 +33,7 @@ class Request: params (dict | None): URL parameters. data (bytes | None): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). Attributes: method (Method): HTTP method. @@ -41,6 +42,7 @@ class Request: params (dict | None): URL parameters. data (bytes | None): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). """ __slots__ = ( @@ -50,6 +52,7 @@ class Request: "params", "data", "auth", + "prefix_needed", ) def __init__( @@ -60,6 +63,7 @@ def __init__( params: Optional[Params] = None, data: Optional[bytes | str] = None, auth: Optional[Auth] = None, + prefix_needed: bool = True, ) -> None: self.method: Method = method self.endpoint: str = endpoint @@ -67,6 +71,7 @@ def __init__( self.params: Params = params or dict() self.data: Optional[bytes | str] = data self.auth: Optional[Auth] = auth + self.prefix_needed = prefix_needed def normalized_headers(self) -> RequestHeaders: """Normalize request headers. From e014bf850b91f10c4b09b092dcdeced6c871b0f8 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 28 Jul 2025 19:45:41 +0200 Subject: [PATCH 20/47] Collection API completeness (#59) * Adding /figures support * Adding support for /responsibleShard * Adding support for /shards * Adding support for /revision * Adding support for /checksum * Added support for /key-generators * Skipping part of test in 3.11 * Adding configure method * Adding renaming method * recalculate-the-document-count-of-a-collection * compact-a-collection --- arangoasync/collection.py | 322 +++++++++++++++++++++++++++++++++++++- arangoasync/database.py | 24 +++ arangoasync/exceptions.py | 48 +++++- arangoasync/typings.py | 142 ++++++++++++++++- tests/test_collection.py | 83 +++++++++- tests/test_database.py | 15 +- tests/test_typings.py | 60 +++++++ 7 files changed, 677 insertions(+), 17 deletions(-) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index 810ee06..e3d12ee 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -16,7 +16,16 @@ HTTP_PRECONDITION_FAILED, ) from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, DocumentDeleteError, @@ -40,7 +49,9 @@ from arangoasync.result import Result from arangoasync.serialization import Deserializer, Serializer from arangoasync.typings import ( + CollectionInfo, CollectionProperties, + CollectionStatistics, IndexProperties, Json, Jsons, @@ -481,6 +492,26 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def recalculate_count(self) -> None: + """Recalculate the document count. + + Raises: + CollectionRecalculateCountError: If re-calculation fails. + + References: + - `recalculate-the-document-count-of-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/recalculateCount", + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRecalculateCountError(resp, request) + + await self._executor.execute(request, response_handler) + async def properties(self) -> Result[CollectionProperties]: """Return the full properties of the current collection. @@ -501,7 +532,129 @@ async def properties(self) -> Result[CollectionProperties]: def response_handler(resp: Response) -> CollectionProperties: if not resp.is_success: raise CollectionPropertiesError(resp, request) - return CollectionProperties(self._executor.deserialize(resp.raw_body)) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def configure( + self, + cache_enabled: Optional[bool] = None, + computed_values: Optional[Jsons] = None, + replication_factor: Optional[int | str] = None, + schema: Optional[Json] = None, + wait_for_sync: Optional[bool] = None, + write_concern: Optional[int] = None, + ) -> Result[CollectionProperties]: + """Changes the properties of a collection. + + Only the provided attributes are updated. + + Args: + cache_enabled (bool | None): Whether the in-memory hash cache + for documents should be enabled for this collection. + computed_values (list | None): An optional list of objects, each + representing a computed value. + replication_factor (int | None): In a cluster, this attribute determines + how many copies of each shard are kept on different DB-Servers. + For SatelliteCollections, it needs to be the string "satellite". + schema (dict | None): The configuration of the collection-level schema + validation for documents. + wait_for_sync (bool | None): If set to `True`, the data is synchronized + to disk before returning from a document create, update, replace or + removal operation. + write_concern (int | None): Determines how many copies of each shard are + required to be in sync on the different DB-Servers. + + Returns: + CollectionProperties: Properties. + + Raises: + CollectionConfigureError: If configuration fails. + + References: + - `change-the-properties-of-a-collection `__ + """ # noqa: E501 + data: Json = {} + if cache_enabled is not None: + data["cacheEnabled"] = cache_enabled + if computed_values is not None: + data["computedValues"] = computed_values + if replication_factor is not None: + data["replicationFactor"] = replication_factor + if schema is not None: + data["schema"] = schema + if wait_for_sync is not None: + data["waitForSync"] = wait_for_sync + if write_concern is not None: + data["writeConcern"] = write_concern + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/properties", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> CollectionProperties: + if not resp.is_success: + raise CollectionConfigureError(resp, request) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def rename(self, new_name: str) -> None: + """Rename the collection. + + Renames may not be reflected immediately in async execution, batch + execution or transactions. It is recommended to initialize new API + wrappers after a rename. + + Note: + Renaming collections is not supported in cluster deployments. + + Args: + new_name (str): New collection name. + + Raises: + CollectionRenameError: If rename fails. + + References: + - `rename-a-collection `__ + """ # noqa: E501 + data: Json = {"name": new_name} + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/rename", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRenameError(resp, request) + self._name = new_name + self._id_prefix = f"{new_name}/" + + await self._executor.execute(request, response_handler) + + async def compact(self) -> Result[CollectionInfo]: + """Compact a collection. + + Returns: + CollectionInfo: Collection information. + + Raises: + CollectionCompactError: If compaction fails. + + References: + - `compact-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/compact", + ) + + def response_handler(resp: Response) -> CollectionInfo: + if not resp.is_success: + raise CollectionCompactError(resp, request) + return CollectionInfo(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -552,7 +705,10 @@ async def count(self) -> Result[int]: Raises: DocumentCountError: If retrieval fails. - """ + + References: + - `get-the-document-count-of-a-collection `__ + """ # noqa: E501 request = Request( method=Method.GET, endpoint=f"/_api/collection/{self.name}/count" ) @@ -565,6 +721,158 @@ def response_handler(resp: Response) -> int: return await self._executor.execute(request, response_handler) + async def statistics(self) -> Result[CollectionStatistics]: + """Get additional statistical information about the collection. + + Returns: + CollectionStatistics: Collection statistics. + + Raises: + CollectionStatisticsError: If retrieval fails. + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/figures", + ) + + def response_handler(resp: Response) -> CollectionStatistics: + if not resp.is_success: + raise CollectionStatisticsError(resp, request) + return CollectionStatistics(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def responsible_shard(self, document: Json) -> Result[str]: + """Return the ID of the shard responsible for given document. + + If the document does not exist, return the shard that would be + responsible. + + Args: + document (dict): Document body with "_key" field. + + Returns: + str: Shard ID. + + Raises: + CollectionResponsibleShardError: If retrieval fails. + + References: + - `get-the-responsible-shard-for-a-document `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/responsibleShard", + data=self.serializer.dumps(document), + ) + + def response_handler(resp: Response) -> str: + if resp.is_success: + body = self.deserializer.loads(resp.raw_body) + return cast(str, body["shardId"]) + raise CollectionResponsibleShardError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def shards(self, details: Optional[bool] = None) -> Result[Json]: + """Return collection shards and properties. + + Available only in a cluster setup. + + Args: + details (bool | None): If set to `True`, include responsible + servers for these shards. + + Returns: + dict: Collection shards. + + Raises: + CollectionShardsError: If retrieval fails. + + References: + - `get-the-shard-ids-of-a-collection `__ + """ # noqa: E501 + params: Params = {} + if details is not None: + params["details"] = details + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/shards", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise CollectionShardsError(resp, request) + return cast(Json, self.deserializer.loads(resp.raw_body)["shards"]) + + return await self._executor.execute(request, response_handler) + + async def revision(self) -> Result[str]: + """Return collection revision. + + Returns: + str: Collection revision. + + Raises: + CollectionRevisionError: If retrieval fails. + + References: + - `get-the-collection-revision-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/revision", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionRevisionError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["revision"]) + + return await self._executor.execute(request, response_handler) + + async def checksum( + self, with_rev: Optional[bool] = None, with_data: Optional[bool] = None + ) -> Result[str]: + """Calculate collection checksum. + + Args: + with_rev (bool | None): Include document revisions in checksum calculation. + with_data (bool | None): Include document data in checksum calculation. + + Returns: + str: Collection checksum. + + Raises: + CollectionChecksumError: If retrieval fails. + + References: + - `get-the-collection-checksum `__ + """ # noqa: E501 + params: Params = {} + if with_rev is not None: + params["withRevision"] = with_rev + if with_data is not None: + params["withData"] = with_data + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/checksum", + params=params, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionChecksumError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["checksum"]) + + return await self._executor.execute(request, response_handler) + async def has( self, document: str | Json, @@ -1444,9 +1752,9 @@ async def insert( def response_handler(resp: Response) -> bool | Json: if resp.is_success: - if silent is True: + if silent: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_BAD_PARAMETER: msg = ( @@ -1551,7 +1859,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1641,7 +1949,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1726,7 +2034,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) diff --git a/arangoasync/database.py b/arangoasync/database.py index c188290..578222f 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -22,6 +22,7 @@ AsyncJobListError, CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, DatabaseCreateError, DatabaseDeleteError, @@ -695,6 +696,29 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def key_generators(self) -> Result[List[str]]: + """Returns the available key generators for collections. + + Returns: + list: List of available key generators. + + Raises: + CollectionKeyGeneratorsError: If retrieval fails. + + References: + - `get-the-available-key-generators `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/key-generators") + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise CollectionKeyGeneratorsError(resp, request) + return cast( + List[str], self.deserializer.loads(resp.raw_body)["keyGenerators"] + ) + + return await self._executor.execute(request, response_handler) + async def has_document( self, document: str | Json, diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index e052fd4..5de6ea4 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -183,10 +183,26 @@ class CollectionCreateError(ArangoServerError): """Failed to create collection.""" +class CollectionChecksumError(ArangoServerError): + """Failed to retrieve collection checksum.""" + + +class CollectionConfigureError(ArangoServerError): + """Failed to configure collection properties.""" + + +class CollectionCompactError(ArangoServerError): + """Failed to compact collection.""" + + class CollectionDeleteError(ArangoServerError): """Failed to delete collection.""" +class CollectionKeyGeneratorsError(ArangoServerError): + """Failed to retrieve key generators.""" + + class CollectionListError(ArangoServerError): """Failed to retrieve collections.""" @@ -195,18 +211,42 @@ class CollectionPropertiesError(ArangoServerError): """Failed to retrieve collection properties.""" -class ClientConnectionAbortedError(ArangoClientError): - """The connection was aborted.""" +class CollectionRecalculateCountError(ArangoServerError): + """Failed to recalculate document count.""" -class ClientConnectionError(ArangoClientError): - """The request was unable to reach the server.""" +class CollectionRenameError(ArangoServerError): + """Failed to rename collection.""" + + +class CollectionResponsibleShardError(ArangoServerError): + """Failed to retrieve responsible shard.""" + + +class CollectionRevisionError(ArangoServerError): + """Failed to retrieve collection revision.""" + + +class CollectionShardsError(ArangoServerError): + """Failed to retrieve collection shards.""" + + +class CollectionStatisticsError(ArangoServerError): + """Failed to retrieve collection statistics.""" class CollectionTruncateError(ArangoServerError): """Failed to truncate collection.""" +class ClientConnectionAbortedError(ArangoClientError): + """The connection was aborted.""" + + +class ClientConnectionError(ArangoClientError): + """The request was unable to reach the server.""" + + class CursorCloseError(ArangoServerError): """Failed to delete the cursor result from server.""" diff --git a/arangoasync/typings.py b/arangoasync/typings.py index 280e27e..d49411d 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -791,8 +791,6 @@ def compatibility_formatter(data: Json) -> Json: result["deleted"] = data["deleted"] if "syncByRevision" in data: result["sync_by_revision"] = data["syncByRevision"] - if "tempObjectId" in data: - result["temp_object_id"] = data["tempObjectId"] if "usesRevisionsAsDocumentIds" in data: result["rev_as_id"] = data["usesRevisionsAsDocumentIds"] if "isDisjoint" in data: @@ -819,6 +817,146 @@ def format(self, formatter: Optional[Formatter] = None) -> Json: return self.compatibility_formatter(self._data) +class CollectionStatistics(JsonWrapper): + """Statistical information about the collection. + + Example: + .. code-block:: json + + { + "figures" : { + "indexes" : { + "count" : 1, + "size" : 1234 + }, + "documentsSize" : 5601, + "cacheInUse" : false, + "cacheSize" : 0, + "cacheUsage" : 0, + "engine" : { + "documents" : 1, + "indexes" : [ + { + "type" : "primary", + "id" : 0, + "count" : 1 + } + ] + } + }, + "writeConcern" : 1, + "waitForSync" : false, + "usesRevisionsAsDocumentIds" : true, + "syncByRevision" : true, + "statusString" : "loaded", + "id" : "69123", + "isSmartChild" : false, + "schema" : null, + "name" : "products", + "type" : 2, + "status" : 3, + "count" : 1, + "cacheEnabled" : false, + "isSystem" : false, + "internalValidatorType" : 0, + "globallyUniqueId" : "hB7C02EE43DCE/69123", + "keyOptions" : { + "allowUserKeys" : true, + "type" : "traditional", + "lastValue" : 69129 + }, + "computedValues" : null, + "objectId" : "69124" + } + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def figures(self) -> Json: + return cast(Json, self._data.get("figures")) + + @property + def write_concern(self) -> Optional[int]: + return self._data.get("writeConcern") + + @property + def wait_for_sync(self) -> Optional[bool]: + return self._data.get("waitForSync") + + @property + def use_revisions_as_document_ids(self) -> Optional[bool]: + return self._data.get("usesRevisionsAsDocumentIds") + + @property + def sync_by_revision(self) -> Optional[bool]: + return self._data.get("syncByRevision") + + @property + def status_string(self) -> Optional[str]: + return self._data.get("statusString") + + @property + def id(self) -> str: + return self._data["id"] # type: ignore[no-any-return] + + @property + def is_smart_child(self) -> bool: + return self._data["isSmartChild"] # type: ignore[no-any-return] + + @property + def schema(self) -> Optional[Json]: + return self._data.get("schema") + + @property + def name(self) -> str: + return self._data["name"] # type: ignore[no-any-return] + + @property + def type(self) -> CollectionType: + return CollectionType.from_int(self._data["type"]) + + @property + def status(self) -> CollectionStatus: + return CollectionStatus.from_int(self._data["status"]) + + @property + def count(self) -> int: + return self._data["count"] # type: ignore[no-any-return] + + @property + def cache_enabled(self) -> Optional[bool]: + return self._data.get("cacheEnabled") + + @property + def is_system(self) -> bool: + return self._data["isSystem"] # type: ignore[no-any-return] + + @property + def internal_validator_type(self) -> Optional[int]: + return self._data.get("internalValidatorType") + + @property + def globally_unique_id(self) -> str: + return self._data["globallyUniqueId"] # type: ignore[no-any-return] + + @property + def key_options(self) -> KeyOptions: + return KeyOptions(self._data["keyOptions"]) + + @property + def computed_values(self) -> Optional[Json]: + return self._data.get("computedValues") + + @property + def object_id(self) -> str: + return self._data["objectId"] # type: ignore[no-any-return] + + class IndexProperties(JsonWrapper): """Properties of an index. diff --git a/tests/test_collection.py b/tests/test_collection.py index d9214dd..fb8d7ba 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -4,7 +4,16 @@ from arangoasync.errno import DATA_SOURCE_NOT_FOUND, INDEX_NOT_FOUND from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, IndexCreateError, @@ -13,6 +22,7 @@ IndexListError, IndexLoadError, ) +from tests.helpers import generate_col_name def test_collection_attributes(db, doc_col): @@ -22,7 +32,9 @@ def test_collection_attributes(db, doc_col): @pytest.mark.asyncio -async def test_collection_misc_methods(doc_col, bad_col): +async def test_collection_misc_methods(doc_col, bad_col, docs, cluster): + doc = await doc_col.insert(docs[0]) + # Properties properties = await doc_col.properties() assert properties.name == doc_col.name @@ -31,6 +43,75 @@ async def test_collection_misc_methods(doc_col, bad_col): with pytest.raises(CollectionPropertiesError): await bad_col.properties() + # Configure + wfs = not properties.wait_for_sync + new_properties = await doc_col.configure(wait_for_sync=wfs) + assert new_properties.wait_for_sync == wfs + with pytest.raises(CollectionConfigureError): + await bad_col.configure(wait_for_sync=wfs) + + # Statistics + statistics = await doc_col.statistics() + assert statistics.name == doc_col.name + assert "figures" in statistics + with pytest.raises(CollectionStatisticsError): + await bad_col.statistics() + + # Shards + if cluster: + shard = await doc_col.responsible_shard(doc) + assert isinstance(shard, str) + with pytest.raises(CollectionResponsibleShardError): + await bad_col.responsible_shard(doc) + shards = await doc_col.shards(details=True) + assert isinstance(shards, dict) + with pytest.raises(CollectionShardsError): + await bad_col.shards() + + # Revision + revision = await doc_col.revision() + assert isinstance(revision, str) + with pytest.raises(CollectionRevisionError): + await bad_col.revision() + + # Checksum + checksum = await doc_col.checksum(with_rev=True, with_data=True) + assert isinstance(checksum, str) + with pytest.raises(CollectionChecksumError): + await bad_col.checksum() + + # Recalculate count + with pytest.raises(CollectionRecalculateCountError): + await bad_col.recalculate_count() + await doc_col.recalculate_count() + + # Compact + with pytest.raises(CollectionCompactError): + await bad_col.compact() + res = await doc_col.compact() + assert res.name == doc_col.name + + +@pytest.mark.asyncio +async def test_collection_rename(cluster, db, bad_col, docs): + if cluster: + pytest.skip("Renaming collections is not supported in cluster deployments.") + + with pytest.raises(CollectionRenameError): + await bad_col.rename("new_name") + + col_name = generate_col_name() + new_name = generate_col_name() + try: + await db.create_collection(col_name) + col = db.collection(col_name) + await col.rename(new_name) + assert col.name == new_name + doc = await col.insert(docs[0]) + assert col.get_col_name(doc) == new_name + finally: + db.delete_collection(new_name, ignore_missing=True) + @pytest.mark.asyncio async def test_collection_index(doc_col, bad_col, cluster): diff --git a/tests/test_database.py b/tests/test_database.py index eb7daa3..7058ac1 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,11 +1,13 @@ import asyncio import pytest +from packaging import version from arangoasync.collection import StandardCollection from arangoasync.exceptions import ( CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, DatabaseCreateError, DatabaseDeleteError, @@ -21,7 +23,7 @@ @pytest.mark.asyncio -async def test_database_misc_methods(sys_db, db, bad_db, cluster): +async def test_database_misc_methods(sys_db, db, bad_db, cluster, db_version): # Status status = await sys_db.status() assert status["server"] == "arango" @@ -50,11 +52,18 @@ async def test_database_misc_methods(sys_db, db, bad_db, cluster): await bad_db.reload_jwt_secrets() # Version - version = await sys_db.version() - assert version["version"].startswith("3.") + v = await sys_db.version() + assert v["version"].startswith("3.") with pytest.raises(ServerVersionError): await bad_db.version() + # key generators + if db_version >= version.parse("3.12.0"): + key_generators = await db.key_generators() + assert isinstance(key_generators, list) + with pytest.raises(CollectionKeyGeneratorsError): + await bad_db.key_generators() + @pytest.mark.asyncio async def test_create_drop_database( diff --git a/tests/test_typings.py b/tests/test_typings.py index fd04fa1..3b4e5e2 100644 --- a/tests/test_typings.py +++ b/tests/test_typings.py @@ -2,6 +2,7 @@ from arangoasync.typings import ( CollectionInfo, + CollectionStatistics, CollectionStatus, CollectionType, EdgeDefinitionOptions, @@ -386,3 +387,62 @@ def test_EdgeDefinitionOptions(): ) assert options.satellites == ["col1", "col2"] + + +def test_CollectionStatistics(): + data = { + "figures": { + "indexes": {"count": 1, "size": 1234}, + "documentsSize": 5601, + "cacheInUse": False, + "cacheSize": 0, + "cacheUsage": 0, + }, + "writeConcern": 1, + "waitForSync": False, + "usesRevisionsAsDocumentIds": True, + "syncByRevision": True, + "statusString": "loaded", + "id": "69123", + "isSmartChild": False, + "schema": None, + "name": "products", + "type": 2, + "status": 3, + "count": 1, + "cacheEnabled": False, + "isSystem": False, + "internalValidatorType": 0, + "globallyUniqueId": "hB7C02EE43DCE/69123", + "keyOptions": { + "allowUserKeys": True, + "type": "traditional", + "lastValue": 69129, + }, + "computedValues": None, + "objectId": "69124", + } + + stats = CollectionStatistics(data) + + assert stats.figures == data["figures"] + assert stats.write_concern == 1 + assert stats.wait_for_sync is False + assert stats.use_revisions_as_document_ids is True + assert stats.sync_by_revision is True + assert stats.status_string == "loaded" + assert stats.id == "69123" + assert stats.is_smart_child is False + assert stats.schema is None + assert stats.name == "products" + assert stats.type == CollectionType.DOCUMENT + assert stats.status == CollectionStatus.LOADED + assert stats.count == 1 + assert stats.cache_enabled is False + assert stats.is_system is False + assert stats.internal_validator_type == 0 + assert stats.globally_unique_id == "hB7C02EE43DCE/69123" + assert isinstance(stats.key_options, KeyOptions) + assert stats.key_options["type"] == "traditional" + assert stats.computed_values is None + assert stats.object_id == "69124" From 325c4e08268d22d91cf92793d0266f0edd9e1734 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 4 Aug 2025 13:53:36 +0800 Subject: [PATCH 21/47] Hot Backup API (#60) * Hot Backup API * Hot Backup docs * Hot Backup only tested in cluster * Hot Backup only tested for enterprise * Minimize backup tests --- arangoasync/backup.py | 295 ++++++++++++++++++++++++++++++++++++++ arangoasync/database.py | 10 ++ arangoasync/exceptions.py | 24 ++++ docs/backup.rst | 78 ++++++++++ docs/index.rst | 1 + docs/specs.rst | 3 + tests/test_backup.py | 57 ++++++++ 7 files changed, 468 insertions(+) create mode 100644 arangoasync/backup.py create mode 100644 docs/backup.rst create mode 100644 tests/test_backup.py diff --git a/arangoasync/backup.py b/arangoasync/backup.py new file mode 100644 index 0000000..75a26a6 --- /dev/null +++ b/arangoasync/backup.py @@ -0,0 +1,295 @@ +__all__ = ["Backup"] + +from numbers import Number +from typing import Optional, cast + +from arangoasync.exceptions import ( + BackupCreateError, + BackupDeleteError, + BackupDownloadError, + BackupGetError, + BackupRestoreError, + BackupUploadError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons + + +class Backup: + """Backup API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def get(self, backup_id: Optional[str] = None) -> Result[Json]: + """Return backup details. + + Args: + backup_id (str | None): If set, the returned list is restricted to the + backup with the given id. + + Returns: + dict: Backup details. + + Raises: + BackupGetError: If the operation fails. + + References: + - `list-backups `__ + """ # noqa: E501 + data: Json = {} + if backup_id is not None: + data["id"] = backup_id + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/list", + data=self.serializer.dumps(data) if data else None, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def create( + self, + label: Optional[str] = None, + allow_inconsistent: Optional[bool] = None, + force: Optional[bool] = None, + timeout: Optional[Number] = None, + ) -> Result[Json]: + """Create a backup when the global write lock can be obtained. + + Args: + label (str | None): Label for this backup. If not specified, a UUID is used. + allow_inconsistent (bool | None): Allow inconsistent backup when the global + transaction lock cannot be acquired before timeout. + force (bool | None): Forcefully abort all running transactions to ensure a + consistent backup when the global transaction lock cannot be + acquired before timeout. Default (and highly recommended) value + is `False`. + timeout (float | None): The time in seconds that the operation tries to + get a consistent snapshot. + + Returns: + dict: Backup information. + + Raises: + BackupCreateError: If the backup creation fails. + + References: + - `create-backup `__ + """ # noqa: E501 + data: Json = {} + if label is not None: + data["label"] = label + if allow_inconsistent is not None: + data["allowInconsistent"] = allow_inconsistent + if force is not None: + data["force"] = force + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/create", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def restore(self, backup_id: str) -> Result[Json]: + """Restore a local backup. + + Args: + backup_id (str): Backup ID. + + Returns: + dict: Result of the restore operation. + + Raises: + BackupRestoreError: If the restore operation fails. + + References: + - `restore-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/restore", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupRestoreError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def delete(self, backup_id: str) -> None: + """Delete a backup. + + Args: + backup_id (str): Backup ID. + + Raises: + BackupDeleteError: If the delete operation fails. + + References: + - `delete-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/delete", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise BackupDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def upload( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + upload_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup uploads. + + Args: + backup_id (str | None): Backup ID used for scheduling an upload. Mutually + exclusive with parameter **upload_id**. + repository (str | None): Remote repository URL(e.g. "local://tmp/backups"). + abort (str | None): If set to `True`, running upload is aborted. Used with + parameter **upload_id**. + config (dict | None): Remote repository configuration. Required for scheduling + an upload and mutually exclusive with parameter **upload_id**. + upload_id (str | None): Upload ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Upload details. + + Raises: + BackupUploadError: If upload operation fails. + + References: + - `upload-a-backup-to-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if upload_id is not None: + data["uploadId"] = upload_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/upload", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupUploadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def download( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + download_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup downloads. + + Args: + backup_id (str | None): Backup ID used for scheduling a download. Mutually + exclusive with parameter **download_id**. + repository (str | None): Remote repository URL (e.g. "local://tmp/backups"). + abort (bool | None): If set to `True`, running download is aborted. + config (dict | None): Remote repository configuration. Required for scheduling + a download and mutually exclusive with parameter **download_id**. + download_id (str | None): Download ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Download details. + + Raises: + BackupDownloadError: If the download operation fails. + + References: + - `download-a-backup-from-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if download_id is not None: + data["downloadId"] = download_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/download", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupDownloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/database.py b/arangoasync/database.py index 578222f..b048b4f 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -10,6 +10,7 @@ from warnings import warn from arangoasync.aql import AQL +from arangoasync.backup import Backup from arangoasync.collection import Collection, StandardCollection from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND @@ -172,6 +173,15 @@ def aql(self) -> AQL: """ return AQL(self._executor) + @property + def backup(self) -> Backup: + """Return Backup API wrapper. + + Returns: + arangoasync.backup.Backup: Backup API wrapper. + """ + return Backup(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 5de6ea4..41644de 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -179,6 +179,30 @@ class AuthHeaderError(ArangoClientError): """The authentication header could not be determined.""" +class BackupCreateError(ArangoServerError): + """Failed to create a backup.""" + + +class BackupDeleteError(ArangoServerError): + """Failed to delete a backup.""" + + +class BackupDownloadError(ArangoServerError): + """Failed to download a backup from remote repository.""" + + +class BackupGetError(ArangoServerError): + """Failed to retrieve backup details.""" + + +class BackupRestoreError(ArangoServerError): + """Failed to restore from backup.""" + + +class BackupUploadError(ArangoServerError): + """Failed to upload a backup to remote repository.""" + + class CollectionCreateError(ArangoServerError): """Failed to create collection.""" diff --git a/docs/backup.rst b/docs/backup.rst new file mode 100644 index 0000000..de36041 --- /dev/null +++ b/docs/backup.rst @@ -0,0 +1,78 @@ +Backups +------- + +Hot Backups are near instantaneous consistent snapshots of an entire ArangoDB deployment. +This includes all databases, collections, indexes, Views, graphs, and users at any given time. +For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import JwtToken + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + token = JwtToken.generate_token(LOGIN_SECRET) + + # Connect to "_system" database as root user. + db = await client.db( + "_system", auth_method="superuser", token=token, verify=True + ) + + # Get the backup API wrapper. + backup = db.backup + + # Create a backup. + result = await backup.create( + label="foo", + allow_inconsistent=True, + force=False, + timeout=1000 + ) + backup_id = result["id"] + + # Retrieve details on all backups + backups = await backup.get() + + # Retrieve details on a specific backup. + details = await backup.get(backup_id=backup_id) + + # Upload a backup to a remote repository. + result = await backup.upload( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + upload_id = result["uploadId"] + + # Get status of an upload. + status = await backup.upload(upload_id=upload_id) + + # Abort an upload. + await backup.upload(upload_id=upload_id, abort=True) + + # Download a backup from a remote repository. + result = await backup.download( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + download_id = result["downloadId"] + + # Get status of an download. + status = await backup.download(download_id=download_id) + + # Abort an download. + await backup.download(download_id=download_id, abort=True) + + # Restore from a backup. + await backup.restore(backup_id) + + # Delete a backup. + await backup.delete(backup_id) + +See :class:`arangoasync.backup.Backup` for API specification. diff --git a/docs/index.rst b/docs/index.rst index 375303c..1b361fd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -72,6 +72,7 @@ Contents certificates compression serialization + backup errors errno logging diff --git a/docs/specs.rst b/docs/specs.rst index 9983716..a2b982f 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -28,6 +28,9 @@ python-arango-async. .. automodule:: arangoasync.cursor :members: +.. automodule:: arangoasync.backup + :members: + .. automodule:: arangoasync.compression :members: diff --git a/tests/test_backup.py b/tests/test_backup.py new file mode 100644 index 0000000..d2fb07e --- /dev/null +++ b/tests/test_backup.py @@ -0,0 +1,57 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import ( + BackupCreateError, + BackupDeleteError, + BackupDownloadError, + BackupGetError, + BackupRestoreError, + BackupUploadError, +) + + +@pytest.mark.asyncio +async def test_backup(url, sys_db_name, bad_db, token, enterprise, cluster, db_version): + if not enterprise: + pytest.skip("Backup API is only available in ArangoDB Enterprise Edition") + if not cluster: + pytest.skip("For simplicity, the backup API is only tested in cluster setups") + if db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the backup API is only tested in the latest versions" + ) + + with pytest.raises(BackupCreateError): + await bad_db.backup.create() + with pytest.raises(BackupGetError): + await bad_db.backup.get() + with pytest.raises(BackupRestoreError): + await bad_db.backup.restore("foobar") + with pytest.raises(BackupDeleteError): + await bad_db.backup.delete("foobar") + with pytest.raises(BackupUploadError): + await bad_db.backup.upload() + with pytest.raises(BackupDownloadError): + await bad_db.backup.download() + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + backup = db.backup + result = await backup.create() + backup_id = result["id"] + result = await backup.get() + assert "list" in result + result = await backup.restore(backup_id) + assert "previous" in result + config = {"local": {"type": "local"}} + result = await backup.upload(backup_id, repository="local://tmp", config=config) + assert "uploadId" in result + result = await backup.download( + backup_id, repository="local://tmp", config=config + ) + assert "downloadId" in result + await backup.delete(backup_id) From 88338776349da67c91751ae8ac0b9037288ce17c Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Tue, 5 Aug 2025 13:21:21 +0800 Subject: [PATCH 22/47] Adding /_api/import (#61) --- arangoasync/collection.py | 102 ++++++++++++++++++++++++++++++++++++++ docs/document.rst | 33 ++++++++++++ tests/test_collection.py | 18 +++++++ 3 files changed, 153 insertions(+) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index e3d12ee..52a9d9e 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -1578,6 +1578,108 @@ def response_handler( return await self._executor.execute(request, response_handler) + async def import_bulk( + self, + documents: bytes | str, + doc_type: Optional[str] = None, + complete: Optional[bool] = True, + details: Optional[bool] = True, + from_prefix: Optional[str] = None, + to_prefix: Optional[str] = None, + overwrite: Optional[bool] = None, + overwrite_collection_prefix: Optional[bool] = None, + on_duplicate: Optional[str] = None, + wait_for_sync: Optional[bool] = None, + ignore_missing: Optional[bool] = None, + ) -> Result[Json]: + """Load JSON data in bulk into ArangoDB. + + Args: + documents (bytes | str): String representation of the JSON data to import. + doc_type (str | None): Determines how the body of the request is interpreted. + Possible values: "", "documents", "array", "auto". + complete (bool | None): If set to `True`, the whole import fails if any error occurs. + Otherwise, the import continues even if some documents are invalid and cannot + be imported, skipping the problematic documents. + details (bool | None): If set to `True`, the result includes a `details` + attribute with information about documents that could not be imported. + from_prefix (str | None): String prefix prepended to the value of "_from" + field in each edge document inserted. For example, prefix "foo" + prepended to "_from": "bar" will result in "_from": "foo/bar". + Applies only to edge collections. + to_prefix (str | None): String prefix prepended to the value of "_to" + field in each edge document inserted. For example, prefix "foo" + prepended to "_to": "bar" will result in "_to": "foo/bar". + Applies only to edge collections. + overwrite (bool | None): If set to `True`, all existing documents are removed + prior to the import. Indexes are still preserved. + overwrite_collection_prefix (bool | None): Force the `fromPrefix` and + `toPrefix`, possibly replacing existing collection name prefixes. + on_duplicate (str | None): Action to take on unique key constraint violations + (for documents with "_key" fields). Allowed values are "error" (do + not import the new documents and count them as errors), "update" + (update the existing documents while preserving any fields missing + in the new ones), "replace" (replace the existing documents with + new ones), and "ignore" (do not import the new documents and count + them as ignored, as opposed to counting them as errors). Options + "update" and "replace" may fail on secondary unique key constraint + violations. + wait_for_sync (bool | None): Block until operation is synchronized to disk. + ignore_missing (bool | None): When importing JSON arrays of tabular data + (type parameter is omitted), the first line of the request body defines + the attribute keys and the subsequent lines the attribute values for each + document. Subsequent lines with a different number of elements than the + first line are not imported by default. You can enable this option to + import them anyway. For the missing elements, the document attributes + are omitted. Excess elements are ignored. + + Returns: + dict: Result of the import operation. + + Raises: + DocumentInsertError: If import fails. + + References: + - `import-json-data-as-documents `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = self.name + if doc_type is not None: + params["type"] = doc_type + if complete is not None: + params["complete"] = complete + if details is not None: + params["details"] = details + if from_prefix is not None: + params["fromPrefix"] = from_prefix + if to_prefix is not None: + params["toPrefix"] = to_prefix + if overwrite is not None: + params["overwrite"] = overwrite + if overwrite_collection_prefix is not None: + params["overwriteCollectionPrefix"] = overwrite_collection_prefix + if on_duplicate is not None: + params["onDuplicate"] = on_duplicate + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if ignore_missing is not None: + params["ignoreMissing"] = ignore_missing + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DocumentInsertError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + request = Request( + method=Method.POST, + endpoint="/_api/import", + data=documents, + params=params, + ) + + return await self._executor.execute(request, response_handler) + class StandardCollection(Collection[T, U, V]): """Standard collection API wrapper. diff --git a/docs/document.rst b/docs/document.rst index c0764e8..47619db 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -150,6 +150,39 @@ Standard documents are managed via collection API wrapper: # Delete one or more matching documents. await students.delete_match({"first": "Emma"}) +Importing documents in bulk is faster when using specialized methods. Suppose +our data is in a file containing JSON Lines (JSONL) format. Each line is expected +to be one JSON object. Example of a "students.jsonl" file: + +.. code-block:: json + + {"_key":"john","name":"John Smith","age":35} + {"_key":"katie","name":"Katie Foster","age":28} + +To import this file into the "students" collection, we can use the `import_bulk` API: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + import aiofiles + + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Read the JSONL file asynchronously. + async with aiofiles.open('students.jsonl', mode='r') as f: + documents = await f.read() + + # Import documents in bulk. + result = await students.import_bulk(documents, doc_type="documents") + You can manage documents via database API wrappers also, but only simple operations (i.e. get, insert, update, replace, delete) are supported and you must provide document IDs instead of keys: diff --git a/tests/test_collection.py b/tests/test_collection.py index fb8d7ba..2dc4c42 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -16,6 +16,7 @@ CollectionStatisticsError, CollectionTruncateError, DocumentCountError, + DocumentInsertError, IndexCreateError, IndexDeleteError, IndexGetError, @@ -263,3 +264,20 @@ async def test_collection_truncate_count(docs, doc_col, bad_col): await doc_col.truncate(wait_for_sync=True, compact=True) cnt = await doc_col.count() assert cnt == 0 + + +@pytest.mark.asyncio +async def test_collection_import_bulk(doc_col, bad_col, docs): + documents = "\n".join(doc_col.serializer.dumps(doc) for doc in docs) + + # Test errors + with pytest.raises(DocumentInsertError): + await bad_col.import_bulk(documents, doc_type="documents") + + # Insert documents in bulk + result = await doc_col.import_bulk(documents, doc_type="documents") + + # Verify the documents were inserted + count = await doc_col.count() + assert count == len(docs) + assert result["created"] == count From 8155b952b43b61c0e62bd4dc6bc6cc193f2dd557 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 8 Aug 2025 13:29:30 +0800 Subject: [PATCH 23/47] Tasks API (#62) * Adding support for /_api/tasks * Adding docs for /_api/tasks --- arangoasync/database.py | 146 ++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 16 +++++ docs/index.rst | 1 + docs/task.rst | 51 +++++++++++++ tests/conftest.py | 13 ++++ tests/helpers.py | 18 +++++ tests/test_task.py | 79 +++++++++++++++++++++ 7 files changed, 324 insertions(+) create mode 100644 docs/task.rst create mode 100644 tests/test_task.py diff --git a/arangoasync/database.py b/arangoasync/database.py index b048b4f..f2b03ee 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -40,6 +40,10 @@ PermissionUpdateError, ServerStatusError, ServerVersionError, + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, TransactionAbortError, TransactionCommitError, TransactionExecuteError, @@ -2193,6 +2197,148 @@ def response_handler(resp: Response) -> Json: return await self._executor.execute(request, response_handler) + async def tasks(self) -> Result[Jsons]: + """Fetches all existing tasks from the server. + + Returns: + list: List of currently active server tasks. + + Raises: + TaskListError: If the list cannot be retrieved. + + References: + - `list-all-tasks `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/tasks") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise TaskListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def task(self, task_id: str) -> Result[Json]: + """Return the details of an active server task. + + Args: + task_id (str) -> Server task ID. + + Returns: + dict: Details of the server task. + + Raises: + TaskGetError: If the task details cannot be retrieved. + + References: + - `get-a-task `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_task( + self, + command: str, + task_id: Optional[str] = None, + name: Optional[str] = None, + offset: Optional[int] = None, + params: Optional[Json] = None, + period: Optional[int] = None, + ) -> Result[Json]: + """Create a new task. + + Args: + command (str): The JavaScript code to be executed. + task_id (str | None): Optional task ID. If not provided, the server will + generate a unique ID. + name (str | None): The name of the task. + offset (int | None): The offset in seconds after which the task should + start executing. + params (dict | None): Parameters to be passed to the command. + period (int | None): The number of seconds between the executions. + + Returns: + dict: Details of the created task. + + Raises: + TaskCreateError: If the task cannot be created. + + References: + - `create-a-task `__ + - `create-a-task-with-id `__ + """ # noqa: E501 + data: Json = {"command": command} + if name is not None: + data["name"] = name + if offset is not None: + data["offset"] = offset + if params is not None: + data["params"] = params + if period is not None: + data["period"] = period + + if task_id is None: + request = Request( + method=Method.POST, + endpoint="/_api/tasks", + data=self.serializer.dumps(data), + ) + else: + request = Request( + method=Method.PUT, + endpoint=f"/_api/tasks/{task_id}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_task( + self, + task_id: str, + ignore_missing: bool = False, + ) -> Result[bool]: + """Delete a server task. + + Args: + task_id (str): Task ID. + ignore_missing (bool): If `True`, do not raise an exception if the + task does not exist. + + Returns: + bool: `True` if the task was deleted successfully, `False` if the + task was not found and **ignore_missing** was set to `True`. + + Raises: + TaskDeleteError: If the operation fails. + + References: + - `delete-a-task `__ + """ # noqa: E501 + request = Request(method=Method.DELETE, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise TaskDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 41644de..5ca333a 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -451,6 +451,22 @@ class SortValidationError(ArangoClientError): """Invalid sort parameters.""" +class TaskCreateError(ArangoServerError): + """Failed to create server task.""" + + +class TaskDeleteError(ArangoServerError): + """Failed to delete server task.""" + + +class TaskGetError(ArangoServerError): + """Failed to retrieve server task details.""" + + +class TaskListError(ArangoServerError): + """Failed to retrieve server tasks.""" + + class TransactionAbortError(ArangoServerError): """Failed to abort transaction.""" diff --git a/docs/index.rst b/docs/index.rst index 1b361fd..41eaeee 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -73,6 +73,7 @@ Contents compression serialization backup + task errors errno logging diff --git a/docs/task.rst b/docs/task.rst new file mode 100644 index 0000000..2490507 --- /dev/null +++ b/docs/task.rst @@ -0,0 +1,51 @@ +Tasks +----- + +ArangoDB can schedule user-defined Javascript snippets as one-time or periodic +(re-scheduled after each execution) tasks. Tasks are executed in the context of +the database they are defined in. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create a new task which simply prints parameters. + await db.create_task( + name="test_task", + command=""" + var task = function(params){ + var db = require('@arangodb'); + db.print(params); + } + task(params); + """, + params={"foo": "bar"}, + offset=300, + period=10, + task_id="001" + ) + + # List all active tasks + tasks = await db.tasks() + + # Retrieve details of a task by ID. + details = await db.task("001") + + # Delete an existing task by ID. + await db.delete_task('001', ignore_missing=True) + + +.. note:: + When deleting a database, any tasks that were initialized under its context + remain active. It is therefore advisable to delete any running tasks before + deleting the database. diff --git a/tests/conftest.py b/tests/conftest.py index 98d75de..66e5a9d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -256,6 +256,19 @@ async def teardown(): verify=False, ) + # Remove all tasks + test_tasks = [ + task + for task in await sys_db.tasks() + if task["name"].startswith("test_task") + ] + await asyncio.gather( + *( + sys_db.delete_task(task["id"], ignore_missing=True) + for task in test_tasks + ) + ) + # Remove all test users. tst_users = [ user["user"] diff --git a/tests/helpers.py b/tests/helpers.py index f2f63f7..dfaae4d 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -62,3 +62,21 @@ def generate_analyzer_name(): str: Random analyzer name. """ return f"test_analyzer_{uuid4().hex}" + + +def generate_task_name(): + """Generate and return a random task name. + + Returns: + str: Random task name. + """ + return f"test_task_{uuid4().hex}" + + +def generate_task_id(): + """Generate and return a random task ID. + + Returns: + str: Random task ID + """ + return f"test_task_id_{uuid4().hex}" diff --git a/tests/test_task.py b/tests/test_task.py new file mode 100644 index 0000000..4e1aee6 --- /dev/null +++ b/tests/test_task.py @@ -0,0 +1,79 @@ +import pytest + +from arangoasync.exceptions import ( + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, +) +from tests.helpers import generate_task_id, generate_task_name + + +@pytest.mark.asyncio +async def test_task_management(sys_db, bad_db): + # This test intentionally uses the system database because cleaning up tasks is + # easier there. + + test_command = 'require("@arangodb").print(params);' + + # Test errors + with pytest.raises(TaskCreateError): + await bad_db.create_task(command=test_command) + with pytest.raises(TaskGetError): + await bad_db.task("non_existent_task_id") + with pytest.raises(TaskListError): + await bad_db.tasks() + with pytest.raises(TaskDeleteError): + await bad_db.delete_task("non_existent_task_id") + + # Create a task with a random ID + task_name = generate_task_name() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + ) + assert new_task["name"] == task_name + task_id = new_task["id"] + assert await sys_db.task(task_id) == new_task + + # Delete task + assert await sys_db.delete_task(task_id) is True + + # Create a task with a specific ID + task_name = generate_task_name() + task_id = generate_task_id() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + period=10, + task_id=task_id, + ) + assert new_task["name"] == task_name + assert new_task["id"] == task_id + + # Try to create a duplicate task + with pytest.raises(TaskCreateError): + await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + task_id=task_id, + ) + + # Test get missing task + with pytest.raises(TaskGetError): + await sys_db.task(generate_task_id()) + + # Test list tasks + tasks = await sys_db.tasks() + assert len(tasks) == 1 + + # Delete tasks + assert await sys_db.delete_task(task_id) is True + assert await sys_db.delete_task(task_id, ignore_missing=True) is False + with pytest.raises(TaskDeleteError): + await sys_db.delete_task(task_id) From a171df7b449cea79a2a2ce3d41ad0052261e0d6c Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 8 Aug 2025 14:39:03 +0800 Subject: [PATCH 24/47] Adding security API (#63) --- arangoasync/database.py | 78 +++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 12 ++++++ docs/certificates.rst | 22 +++++++++++ docs/migration.rst | 2 +- tests/test_client.py | 14 +++++++ 5 files changed, 127 insertions(+), 1 deletion(-) diff --git a/arangoasync/database.py b/arangoasync/database.py index f2b03ee..b338b56 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -38,7 +38,10 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerEncryptionError, ServerStatusError, + ServerTLSError, + ServerTLSReloadError, ServerVersionError, TaskCreateError, TaskDeleteError, @@ -2072,6 +2075,81 @@ def response_handler(resp: Response) -> Json: return await self._executor.execute(request, response_handler) + async def tls(self) -> Result[Json]: + """Return TLS data (keyfile, clientCA). + + This API requires authentication. + + Returns: + dict: dict containing the following components: + - keyfile: Information about the key file. + - clientCA: Information about the Certificate Authority (CA) for client certificate verification. + + Raises: + ServerTLSError: If the operation fails. + + References: + - `get-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def reload_tls(self) -> Result[Json]: + """Reload TLS data (keyfile, clientCA). + + This is a protected API and can only be executed with superuser rights. + + Returns: + dict: New TLS data. + + Raises: + ServerTLSReloadError: If the operation fails. + + References: + - `reload-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSReloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def encryption(self) -> Result[Json]: + """Rotate the user-supplied keys for encryption. + + This is a protected API and can only be executed with superuser rights. + This API is not available on Coordinator nodes. + + Returns: + dict: Encryption keys. + + Raises: + ServerEncryptionError: If the operation fails. + + References: + - `rotate-the-encryption-keys `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/encryption") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEncryptionError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + async def list_transactions(self) -> Result[Jsons]: """List all currently running stream transactions. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 5ca333a..5e2844a 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -435,6 +435,10 @@ class SerializationError(ArangoClientError): """Failed to serialize the request.""" +class ServerEncryptionError(ArangoServerError): + """Failed to reload user-defined encryption keys.""" + + class ServerConnectionError(ArangoServerError): """Failed to connect to ArangoDB server.""" @@ -443,6 +447,14 @@ class ServerStatusError(ArangoServerError): """Failed to retrieve server status.""" +class ServerTLSError(ArangoServerError): + """Failed to retrieve TLS data.""" + + +class ServerTLSReloadError(ArangoServerError): + """Failed to reload TLS.""" + + class ServerVersionError(ArangoServerError): """Failed to retrieve server version.""" diff --git a/docs/certificates.rst b/docs/certificates.rst index c0665fa..ee49e13 100644 --- a/docs/certificates.rst +++ b/docs/certificates.rst @@ -108,3 +108,25 @@ Use a client certificate chain If you want to have fine-grained control over the HTTP connection, you should define your HTTP client as described in the :ref:`HTTP` section. + +Security features +================= + +See the `ArangoDB Manual`_ for more information on security features. + +**Example:** + +.. code-block:: python + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + + # Get TLS data + tls = await db.tls() + + # Reload TLS data + tls = await db.reload_tls() + +.. _ArangoDB Manual: https://docs.arangodb.com/stable/develop/http-api/security/ diff --git a/docs/migration.rst b/docs/migration.rst index f26e7d6..7c2427e 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -2,7 +2,7 @@ Coming from python-arango ------------------------- Generally, migrating from `python-arango`_ should be a smooth transition. For the most part, the API is similar, -but there are a few things to note._ +but there are a few things to note. Helpers ======= diff --git a/tests/test_client.py b/tests/test_client.py index 6210412..cb488a7 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -3,6 +3,7 @@ from arangoasync.auth import JwtToken from arangoasync.client import ArangoClient from arangoasync.compression import DefaultCompressionManager +from arangoasync.exceptions import ServerEncryptionError from arangoasync.http import DefaultHTTPClient from arangoasync.resolver import DefaultHostResolver, RoundRobinHostResolver from arangoasync.version import __version__ @@ -131,6 +132,19 @@ async def test_client_jwt_superuser_auth( await db.jwt_secrets() await db.reload_jwt_secrets() + # Get TLS data + tls = await db.tls() + assert isinstance(tls, dict) + + # Reload TLS data + tls = await db.reload_tls() + assert isinstance(tls, dict) + + # Rotate + with pytest.raises(ServerEncryptionError): + # Not allowed on coordinators + await db.encryption() + # token missing async with ArangoClient(hosts=url) as client: with pytest.raises(ValueError): From 7586d09f7232f6b0fbda3f7c513047abbcc4d074 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 10 Aug 2025 11:16:27 +0800 Subject: [PATCH 25/47] Cluster API (#64) * Adding cluster API * Adding rebalance operations * Adding cluster documentation --- arangoasync/cluster.py | 451 ++++++++++++++++++++++++++++++++++++++ arangoasync/database.py | 10 + arangoasync/exceptions.py | 28 +++ docs/cluster.rst | 53 +++++ docs/index.rst | 1 + docs/specs.rst | 3 + tests/test_cluster.py | 101 +++++++++ 7 files changed, 647 insertions(+) create mode 100644 arangoasync/cluster.py create mode 100644 docs/cluster.rst create mode 100644 tests/test_cluster.py diff --git a/arangoasync/cluster.py b/arangoasync/cluster.py new file mode 100644 index 0000000..ce33b92 --- /dev/null +++ b/arangoasync/cluster.py @@ -0,0 +1,451 @@ +__all__ = ["Cluster"] + +from typing import List, Optional, cast + +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Cluster: + """Cluster-specific endpoints.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def health(self) -> Result[Json]: + """Queries the health of the cluster. + + Returns: + dict: Health status of the cluster. + + Raises: + ClusterHealthError: If retrieval fails. + + References: + - `get-the-cluster-health `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/health", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterHealthError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def statistics(self, db_server: str) -> Result[Json]: + """Queries the statistics of the given DB-Server. + + Args: + db_server (str): The ID of the DB-Server. + + Returns: + dict: Statistics of the DB-Server. + + Raises: + ClusterStatisticsError: If retrieval fails. + + References: + - `get-the-statistics-of-a-db-server `__ + """ # noqa: E501 + params: Params = {"DBserver": db_server} + + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/statistics", + prefix_needed=False, + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterStatisticsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def endpoints(self) -> Result[List[str]]: + """Fetch all coordinator endpoints. + + Returns: + list: List of coordinator endpoints. + + Raises: + ClusterEndpointsError: If retrieval fails. + + References: + - `list-all-coordinator-endpoints `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/cluster/endpoints", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise ClusterEndpointsError(resp, request) + body: Json = self.deserializer.loads(resp.raw_body) + return [item["endpoint"] for item in body["endpoints"]] + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Get the ID of the current server. + + Returns: + str: Server ID. + + Raises: + ClusterServerIDError: If retrieval fails. + + References: + - `get-the-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/id", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerIDError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["id"]) + + return await self._executor.execute(request, response_handler) + + async def server_role(self) -> Result[str]: + """Get the role of the current server + + Returns: + str: Server role. Possible values: "SINGLE", "COORDINATOR", "PRIMARY", "SECONDARY", "AGENT", "UNDEFINED". + + Raises: + ClusterServerRoleError: If retrieval fails. + + References: + - `get-the-server-role `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/role", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerRoleError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["role"]) + + return await self._executor.execute(request, response_handler) + + async def toggle_maintenance_mode(self, mode: str) -> Result[Json]: + """Enable or disable the cluster supervision (agency) maintenance mode. + + Args: + mode (str): Maintenance mode. Allowed values are "on" or "off". + + Returns: + dict: Result of the operation. + + Raises: + ClusterMaintenanceModeError: If the toggle operation fails. + + References: + - `toggle-cluster-maintenance-mode `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/maintenance", + prefix_needed=False, + data=f'"{mode}"', + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def server_maintenance_mode(self, server_id: str) -> Result[Json]: + """Check whether the specified DB-Server is in maintenance mode and until when. + + Args: + server_id (str): Server ID. + + Returns: + dict: Maintenance status for the given server. + + Raises: + ClusterMaintenanceModeError: If retrieval fails. + + References: + - `get-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def toggle_server_maintenance_mode( + self, server_id: str, mode: str, timeout: Optional[int] = None + ) -> None: + """Enable or disable the maintenance mode for the given server. + + Args: + server_id (str): Server ID. + mode (str): Maintenance mode. Allowed values are "normal" and "maintenance". + timeout (int | None): After how many seconds the maintenance mode shall automatically end. + + Raises: + ClusterMaintenanceModeError: If the operation fails. + + References: + - `set-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + data: Json = {"mode": mode} + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.PUT, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + + await self._executor.execute(request, response_handler) + + async def calculate_imbalance(self) -> Result[Json]: + """Computes the current cluster imbalance and returns the result. + + Returns: + dict: Cluster imbalance information. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `get-the-current-cluster-imbalance `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/cluster/rebalance") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def calculate_rebalance_plan( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def rebalance( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute and execute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def execute_rebalance_plan( + self, + moves: List[Json], + version: int = 1, + ) -> Result[int]: + """Execute a set of move shard operations. + + Args: + moves (list): List of move shard operations to be executed. + version (int): Must be set to 1. + + Returns: + int: Indicates whether the methods have been accepted and scheduled for execution. + + Raises: + ClusterRebalanceError: If the execution fails. + + References: + - `execute-a-set-of-move-shard-operations `__ + """ # noqa: E501 + data: Json = dict(version=version, moves=moves) + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance/execute", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> int: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: int = self.deserializer.loads(resp.raw_body)["code"] + return result + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/database.py b/arangoasync/database.py index b338b56..d0ddbbb 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -11,6 +11,7 @@ from arangoasync.aql import AQL from arangoasync.backup import Backup +from arangoasync.cluster import Cluster from arangoasync.collection import Collection, StandardCollection from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND @@ -189,6 +190,15 @@ def backup(self) -> Backup: """ return Backup(self._executor) + @property + def cluster(self) -> Cluster: + """Return Cluster API wrapper. + + Returns: + arangoasync.cluster.Cluster: Cluster API wrapper. + """ + return Cluster(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 5e2844a..bfd30d7 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -271,6 +271,34 @@ class ClientConnectionError(ArangoClientError): """The request was unable to reach the server.""" +class ClusterEndpointsError(ArangoServerError): + """Failed to retrieve coordinator endpoints.""" + + +class ClusterHealthError(ArangoServerError): + """Failed to retrieve cluster health.""" + + +class ClusterMaintenanceModeError(ArangoServerError): + """Failed to enable/disable cluster supervision maintenance mode.""" + + +class ClusterRebalanceError(ArangoServerError): + """Failed to execute cluster rebalancing operation.""" + + +class ClusterServerRoleError(ArangoServerError): + """Failed to retrieve server role in a cluster.""" + + +class ClusterServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + +class ClusterStatisticsError(ArangoServerError): + """Failed to retrieve DB-Server statistics.""" + + class CursorCloseError(ArangoServerError): """Failed to delete the cursor result from server.""" diff --git a/docs/cluster.rst b/docs/cluster.rst new file mode 100644 index 0000000..c5e58aa --- /dev/null +++ b/docs/cluster.rst @@ -0,0 +1,53 @@ +Clusters +-------- + +The cluster-specific API lets you get information about individual +cluster nodes and the cluster as a whole, as well as monitor and +administrate cluster deployments. For more information on the design +and architecture, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + db = await client.db("_system", auth=auth) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + + # DB-Server statistics + db_server = "PRMR-2716c9d0-4b22-4c66-ba3d-f9cd3143e52b" + stats = await cluster.statistics(db_server) + + # Cluster endpoints + endpoints = await cluster.endpoints() + + # Cluster server ID and role + server_id = await cluster.server_id() + server_role = await cluster.server_role() + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + result = await cluster.calculate_rebalance_plan() + result = await cluster.execute_rebalance_plan(moves=[]) + result = await cluster.rebalance() + +See :class:`arangoasync.cluster.Cluster` for API specification. diff --git a/docs/index.rst b/docs/index.rst index 41eaeee..65eefd3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -46,6 +46,7 @@ Contents transaction view analyzer + cluster **API Executions** diff --git a/docs/specs.rst b/docs/specs.rst index a2b982f..763af9c 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -31,6 +31,9 @@ python-arango-async. .. automodule:: arangoasync.backup :members: +.. automodule:: arangoasync.cluster + :members: + .. automodule:: arangoasync.compression :members: diff --git a/tests/test_cluster.py b/tests/test_cluster.py new file mode 100644 index 0000000..d5b0b75 --- /dev/null +++ b/tests/test_cluster.py @@ -0,0 +1,101 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) + + +@pytest.mark.asyncio +async def test_cluster( + url, sys_db_name, bad_db, token, enterprise, cluster, db_version +): + if not cluster: + pytest.skip("Cluster API is only tested in cluster setups") + if not enterprise or db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the cluster API is only tested in the latest versions" + ) + + # Test errors + with pytest.raises(ClusterHealthError): + await bad_db.cluster.health() + with pytest.raises(ClusterStatisticsError): + await bad_db.cluster.statistics("foo") + with pytest.raises(ClusterEndpointsError): + await bad_db.cluster.endpoints() + with pytest.raises(ClusterServerIDError): + await bad_db.cluster.server_id() + with pytest.raises(ClusterServerRoleError): + await bad_db.cluster.server_role() + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_maintenance_mode("on") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_server_maintenance_mode("PRMR0001", "normal") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.server_maintenance_mode("PRMR0001") + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_imbalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.rebalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_rebalance_plan() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.execute_rebalance_plan(moves=[]) + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + assert "Health" in health + + # DB-Server statistics + db_server = None + for server in health["Health"]: + if server.startswith("PRMR"): + db_server = server + break + assert db_server is not None, f"No DB server found in {health}" + stats = await cluster.statistics(db_server) + assert "enabled" in stats + + # Cluster endpoints + endpoints = await cluster.endpoints() + assert len(endpoints) > 0 + + # Cluster server ID and role + server_id = await cluster.server_id() + assert isinstance(server_id, str) + server_role = await cluster.server_role() + assert isinstance(server_role, str) + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + assert isinstance(status, dict) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + assert isinstance(result, dict) + result = await cluster.calculate_rebalance_plan() + assert isinstance(result, dict) + result = await cluster.execute_rebalance_plan(moves=[]) + assert result == 200 + result = await cluster.rebalance() + assert isinstance(result, dict) From 114b45f1d6df144f515ea64c45f0fada3b4fac8b Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Tue, 12 Aug 2025 13:57:08 +0800 Subject: [PATCH 26/47] Foxx API (#65) * Adding foxx API * Finishing foxx API * Foxx documentation --- arangoasync/database.py | 10 + arangoasync/exceptions.py | 84 ++++ arangoasync/foxx.py | 829 ++++++++++++++++++++++++++++++++++++++ arangoasync/request.py | 10 +- docs/document.rst | 2 +- docs/foxx.rst | 147 +++++++ docs/index.rst | 1 + pyproject.toml | 1 + tests/helpers.py | 9 + tests/static/service.zip | Bin 0 -> 2963 bytes tests/test_foxx.py | 245 +++++++++++ 11 files changed, 1332 insertions(+), 6 deletions(-) create mode 100644 arangoasync/foxx.py create mode 100644 docs/foxx.rst create mode 100644 tests/static/service.zip create mode 100644 tests/test_foxx.py diff --git a/arangoasync/database.py b/arangoasync/database.py index d0ddbbb..be057c4 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -74,6 +74,7 @@ DefaultApiExecutor, TransactionApiExecutor, ) +from arangoasync.foxx import Foxx from arangoasync.graph import Graph from arangoasync.request import Method, Request from arangoasync.response import Response @@ -199,6 +200,15 @@ def cluster(self) -> Cluster: """ return Cluster(self._executor) + @property + def foxx(self) -> Foxx: + """Return Foxx API wrapper. + + Returns: + arangoasync.foxx.Foxx: Foxx API wrapper. + """ + return Foxx(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index bfd30d7..99340dd 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -395,6 +395,90 @@ class EdgeListError(ArangoServerError): """Failed to retrieve edges coming in and out of a vertex.""" +class FoxxConfigGetError(ArangoServerError): + """Failed to retrieve Foxx service configuration.""" + + +class FoxxConfigReplaceError(ArangoServerError): + """Failed to replace Foxx service configuration.""" + + +class FoxxConfigUpdateError(ArangoServerError): + """Failed to update Foxx service configuration.""" + + +class FoxxCommitError(ArangoServerError): + """Failed to commit local Foxx service state.""" + + +class FoxxDependencyGetError(ArangoServerError): + """Failed to retrieve Foxx service dependencies.""" + + +class FoxxDependencyReplaceError(ArangoServerError): + """Failed to replace Foxx service dependencies.""" + + +class FoxxDependencyUpdateError(ArangoServerError): + """Failed to update Foxx service dependencies.""" + + +class FoxxScriptListError(ArangoServerError): + """Failed to retrieve Foxx service scripts.""" + + +class FoxxDevModeEnableError(ArangoServerError): + """Failed to enable development mode for Foxx service.""" + + +class FoxxDevModeDisableError(ArangoServerError): + """Failed to disable development mode for Foxx service.""" + + +class FoxxDownloadError(ArangoServerError): + """Failed to download Foxx service bundle.""" + + +class FoxxReadmeGetError(ArangoServerError): + """Failed to retrieve Foxx service readme.""" + + +class FoxxScriptRunError(ArangoServerError): + """Failed to run Foxx service script.""" + + +class FoxxServiceCreateError(ArangoServerError): + """Failed to create Foxx service.""" + + +class FoxxServiceDeleteError(ArangoServerError): + """Failed to delete Foxx services.""" + + +class FoxxServiceGetError(ArangoServerError): + """Failed to retrieve Foxx service metadata.""" + + +class FoxxServiceListError(ArangoServerError): + """Failed to retrieve Foxx services.""" + + +class FoxxServiceReplaceError(ArangoServerError): + """Failed to replace Foxx service.""" + + +class FoxxServiceUpdateError(ArangoServerError): + """Failed to update Foxx service.""" + + +class FoxxSwaggerGetError(ArangoServerError): + """Failed to retrieve Foxx service swagger.""" + + +class FoxxTestRunError(ArangoServerError): + """Failed to run Foxx service tests.""" + + class GraphCreateError(ArangoServerError): """Failed to create the graph.""" diff --git a/arangoasync/foxx.py b/arangoasync/foxx.py new file mode 100644 index 0000000..b74d933 --- /dev/null +++ b/arangoasync/foxx.py @@ -0,0 +1,829 @@ +__all__ = ["Foxx"] + +from typing import Any, Optional + +from arangoasync.exceptions import ( + FoxxCommitError, + FoxxConfigGetError, + FoxxConfigReplaceError, + FoxxConfigUpdateError, + FoxxDependencyGetError, + FoxxDependencyReplaceError, + FoxxDependencyUpdateError, + FoxxDevModeDisableError, + FoxxDevModeEnableError, + FoxxDownloadError, + FoxxReadmeGetError, + FoxxScriptListError, + FoxxScriptRunError, + FoxxServiceCreateError, + FoxxServiceDeleteError, + FoxxServiceGetError, + FoxxServiceListError, + FoxxServiceReplaceError, + FoxxServiceUpdateError, + FoxxSwaggerGetError, + FoxxTestRunError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params, RequestHeaders + + +class Foxx: + """Foxx API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + def __repr__(self) -> str: + return f"" + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def services(self, exclude_system: Optional[bool] = False) -> Result[Jsons]: + """List installed services. + + Args: + exclude_system (bool | None): Exclude system services. + + Returns: + list: List of installed services. + + Raises: + FoxxServiceListError: If retrieval fails. + + References: + - `list-the-installed-services `__ + """ # noqa: E501 + params: Params = {} + if exclude_system is not None: + params["excludeSystem"] = exclude_system + + request = Request( + method=Method.GET, + endpoint="/_api/foxx", + params=params, + ) + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise FoxxServiceListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def service(self, mount: str) -> Result[Json]: + """Return service metadata. + + Args: + mount (str): Service mount path (e.g "/_admin/aardvark"). + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceGetError: If retrieval fails. + + References: + - `get-the-service-description `__ + """ # noqa: E501 + params: Params = {"mount": mount} + request = Request( + method=Method.GET, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + development: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + ) -> Result[Json]: + """Installs the given new service at the given mount path. + + Args: + mount (str): Mount path the service should be installed at. + service (Any): Service payload. Can be a JSON string, a file-like object, or a + multipart form. + headers (dict | None): Request headers. + development (bool | None): Whether to install the service in development mode. + setup (bool | None): Whether to run the service setup script. + legacy (bool | None): Whether to install in legacy mode. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceCreateError: If installation fails. + + References: + - `install-a-new-service-mode `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if development is not None: + params["development"] = development + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.POST, + endpoint="/_api/foxx", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_service( + self, + mount: str, + teardown: Optional[bool] = None, + ) -> None: + """Removes the service at the given mount path from the database and file system. + + Args: + mount (str): Mount path of the service to uninstall. + teardown (bool | None): Whether to run the teardown script. + + Raises: + FoxxServiceDeleteError: If operations fails. + + References: + - `uninstall-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxServiceDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def replace_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Replace an existing Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to replace. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to install in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceReplaceError: If replacement fails. + + References: + - `replace-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Upgrade a Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to upgrade. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to upgrade in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceUpdateError: If upgrade fails. + + References: + - `upgrade-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def config(self, mount: str) -> Result[Json]: + """Return service configuration. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service configuration. + + Raises: + FoxxConfigGetError: If retrieval fails. + + References: + - `get-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_config(self, mount: str, options: Json) -> Result[Json]: + """Update service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are ignored. + + Returns: + dict: Updated configuration values. + + Raises: + FoxxConfigUpdateError: If update fails. + + References: + - `update-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_config(self, mount: str, options: Json) -> Result[Json]: + """Replace service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are reset to their + default values or marked as un-configured. + + Returns: + dict: Replaced configuration values. + + Raises: + FoxxConfigReplaceError: If replace fails. + + References: + - `replace-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dependencies(self, mount: str) -> Result[Json]: + """Return service dependencies. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service dependencies settings. + + Raises: + FoxxDependencyGetError: If retrieval fails. + + References: + - `get-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Update service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are ignored. + + Returns: + dict: Updated dependency settings. + + Raises: + FoxxDependencyUpdateError: If update fails. + + References: + - `update-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Replace service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are disabled. + + Returns: + dict: Replaced dependency settings. + + Raises: + FoxxDependencyReplaceError: If replace fails. + + References: + - `replace-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def scripts(self, mount: str) -> Result[Json]: + """List service scripts. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service scripts. + + Raises: + FoxxScriptListError: If retrieval fails. + + References: + - `list-the-service-scripts `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/scripts", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxScriptListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_script( + self, mount: str, name: str, arg: Optional[Json] = None + ) -> Result[Any]: + """Run a service script. + + Args: + mount (str): Service mount path. + name (str): Script name. + arg (dict | None): Arbitrary value passed into the script as first argument. + + Returns: + Any: Returns the exports of the script, if any. + + Raises: + FoxxScriptRunError: If script fails. + + References: + - `run-a-service-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint=f"/_api/foxx/scripts/{name}", + params={"mount": mount}, + data=self.serializer.dumps(arg) if arg is not None else None, + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise FoxxScriptRunError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_tests( + self, + mount: str, + reporter: Optional[str] = None, + idiomatic: Optional[bool] = None, + filter: Optional[str] = None, + output_format: Optional[str] = None, + ) -> Result[str]: + """Run service tests. + + Args: + mount (str): Service mount path. + reporter (str | None): Test reporter. Allowed values are "default" (simple + list of test cases), "suite" (object of test cases nested in + suites), "stream" (raw stream of test results), "xunit" (XUnit or + JUnit compatible structure), or "tap" (raw TAP compatible stream). + idiomatic (bool | None): Use matching format for the reporter, regardless of + the value of parameter **output_format**. + filter (str | None): Only run tests whose full name (test suite and + test case) matches the given string. + output_format (str | None): Used to further control format. Allowed values + are "x-ldjson", "xml" and "text". When using "stream" reporter, + setting this to "x-ldjson" returns newline-delimited JSON stream. + When using "tap" reporter, setting this to "text" returns plain + text TAP report. When using "xunit" reporter, settings this to + "xml" returns an XML instead of JSONML. + + Returns: + str: Reporter output (e.g. raw JSON string, XML, plain text). + + Raises: + FoxxTestRunError: If test fails. + + References: + - `run-the-service-tests `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if reporter is not None: + params["reporter"] = reporter + if idiomatic is not None: + params["idiomatic"] = idiomatic + if filter is not None: + params["filter"] = filter + + headers: RequestHeaders = {} + if output_format == "x-ldjson": + headers["accept"] = "application/x-ldjson" + elif output_format == "xml": + headers["accept"] = "application/xml" + elif output_format == "text": + headers["accept"] = "text/plain" + + request = Request( + method=Method.POST, + endpoint="/_api/foxx/tests", + params=params, + headers=headers, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxTestRunError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def enable_development(self, mount: str) -> Result[Json]: + """Puts the service into development mode. + + While the service is running in development mode, it is reloaded from + the file system, and its setup script (if any) is re-executed every + time the service handles a request. + + In a cluster with multiple coordinators, changes to the filesystem on + one coordinator is not reflected across other coordinators. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeEnableError: If the operation fails. + + References: + - `enable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeEnableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def disable_development(self, mount: str) -> Result[Json]: + """Puts the service into production mode. + + In a cluster with multiple coordinators, the services on all other + coordinators are replaced with the version on the calling coordinator. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeDisableError: If the operation fails. + + References: + - `disable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeDisableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def readme(self, mount: str) -> Result[str]: + """Return the service readme. + + Args: + mount (str): Service mount path. + + Returns: + str: Service readme content. + + Raises: + FoxxReadmeGetError: If retrieval fails. + + References: + - `get-the-service-readme `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/readme", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxReadmeGetError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def swagger(self, mount: str) -> Result[Json]: + """Return the Swagger API description for the given service. + + Args: + mount (str): Service mount path. + + Returns: + dict: Swagger API description. + + Raises: + FoxxSwaggerGetError: If retrieval fails. + + References: + - `get-the-swagger-description `__ + """ # noqa: E501 + request = Request( + method=Method.GET, endpoint="/_api/foxx/swagger", params={"mount": mount} + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxSwaggerGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def download(self, mount: str) -> Result[bytes]: + """Downloads a zip bundle of the service directory. + + When development mode is enabled, this always creates a new bundle. + Otherwise, the bundle will represent the version of a service that is + installed on that ArangoDB instance. + + Args: + mount (str): Service mount path. + + Returns: + bytes: Service bundle zip in raw bytes form. + + Raises: + FoxxDownloadError: If download fails. + + References: + - `download-a-service-bundle `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_api/foxx/download", params={"mount": mount} + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise FoxxDownloadError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def commit(self, replace: Optional[bool] = None) -> None: + """Commit local service state of the coordinator to the database. + + This can be used to resolve service conflicts between coordinators + that cannot be fixed automatically due to missing data. + + Args: + replace (bool | None): If set to `True`, any existing service files in the database + will be overwritten. + + Raises: + FoxxCommitError: If commit fails. + + References: + - `commit-the-local-service-state `__ + """ # noqa: E501 + params: Params = {} + if replace is not None: + params["replace"] = replace + + request = Request( + method=Method.POST, endpoint="/_api/foxx/commit", params=params + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxCommitError(resp, request) + + await self._executor.execute(request, response_handler) diff --git a/arangoasync/request.py b/arangoasync/request.py index 6bd629d..9c43508 100644 --- a/arangoasync/request.py +++ b/arangoasync/request.py @@ -4,7 +4,7 @@ ] from enum import Enum, auto -from typing import Optional +from typing import Any, Optional from arangoasync.auth import Auth from arangoasync.typings import Params, RequestHeaders @@ -31,7 +31,7 @@ class Request: endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. prefix_needed (bool): Whether the request needs a prefix (e.g., database name). @@ -40,7 +40,7 @@ class Request: endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. prefix_needed (bool): Whether the request needs a prefix (e.g., database name). """ @@ -61,7 +61,7 @@ def __init__( endpoint: str, headers: Optional[RequestHeaders] = None, params: Optional[Params] = None, - data: Optional[bytes | str] = None, + data: Optional[Any] = None, auth: Optional[Auth] = None, prefix_needed: bool = True, ) -> None: @@ -69,7 +69,7 @@ def __init__( self.endpoint: str = endpoint self.headers: RequestHeaders = headers or dict() self.params: Params = params or dict() - self.data: Optional[bytes | str] = data + self.data: Optional[Any] = data self.auth: Optional[Auth] = auth self.prefix_needed = prefix_needed diff --git a/docs/document.rst b/docs/document.rst index 47619db..da6434b 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -177,7 +177,7 @@ To import this file into the "students" collection, we can use the `import_bulk` students = db.collection("students") # Read the JSONL file asynchronously. - async with aiofiles.open('students.jsonl', mode='r') as f: + async with aiofiles.open("students.jsonl", mode="r") as f: documents = await f.read() # Import documents in bulk. diff --git a/docs/foxx.rst b/docs/foxx.rst new file mode 100644 index 0000000..818c80e --- /dev/null +++ b/docs/foxx.rst @@ -0,0 +1,147 @@ +Foxx +---- + +**Foxx** is a microservice framework which lets you define custom HTTP endpoints +that extend ArangoDB's REST API. For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount point. + service_mount = "/test_mount" + + # List services. + await foxx.services() + + # Create a service using a source file. + # In this case, the server must have access to the URL. + service = { + "source": "/tests/static/service.zip", + "configuration": {}, + "dependencies": {}, + } + await foxx.create_service( + mount=service_mount, + service=service, + development=True, + setup=True, + legacy=True + ) + + # Update (upgrade) a service. + await db.foxx.update_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=False + ) + + # Replace (overwrite) a service. + await db.foxx.replace_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=True, + force=False + ) + + # Get service details. + await foxx.service(service_mount) + + # Manage service configuration. + await foxx.config(service_mount) + await foxx.update_config(service_mount, options={}) + await foxx.replace_config(service_mount, options={}) + + # Manage service dependencies. + await foxx.dependencies(service_mount) + await foxx.update_dependencies(service_mount, options={}) + await foxx.replace_dependencies(service_mount, options={}) + + # Toggle development mode for a service. + await foxx.enable_development(service_mount) + await foxx.disable_development(service_mount) + + # Other miscellaneous functions. + await foxx.readme(service_mount) + await foxx.swagger(service_mount) + await foxx.download(service_mount) + await foxx.commit() + await foxx.scripts(service_mount) + await foxx.run_script(service_mount, "setup", {}) + await foxx.run_tests(service_mount, reporter="xunit", output_format="xml") + + # Delete a service. + await foxx.delete_service(service_mount) + +There are other ways to create, update, and replace services, such as +providing a file directly instead of a source URL. This is useful when you +want to deploy a service from a local file system without needing the +server to access the file directly. When using this method, you must provide +the appropriate content type in the headers, such as `application/zip` for ZIP files or +`multipart/form-data` for multipart uploads. The following example demonstrates how to do this: + +.. code-block:: python + + import aiofiles + import aiohttp + import json + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount points. + mount_point = "/test_mount" + + # Create the service using multipart/form-data. + service = aiohttp.FormData() + service.add_field( + "source", + open("./tests/static/service.zip", "rb"), + filename="service.zip", + content_type="application/zip", + ) + service.add_field("configuration", json.dumps({})) + service.add_field("dependencies", json.dumps({})) + service_info = await db.foxx.create_service( + mount=mount_point, service=service, headers={"content-type": "multipart/form-data"} + ) + + # Replace the service using raw data. + async with aiofiles.open("./tests/static/service.zip", mode="rb") as f: + service = await f.read() + service_info = await db.foxx.replace_service( + mount=mount_point, service=service, headers={"content-type": "application/zip"} + ) + + # Delete the service. + await db.foxx.delete_service(mount_point) + +See :class:`arangoasync.foxx.Foxx` for API specification. diff --git a/docs/index.rst b/docs/index.rst index 65eefd3..78afe62 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,6 +43,7 @@ Contents .. toctree:: :maxdepth: 1 + foxx transaction view analyzer diff --git a/pyproject.toml b/pyproject.toml index c5c890f..ef00aea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ version = { attr = "arangoasync.version.__version__" } [project.optional-dependencies] dev = [ + "aiofiles>=24.1.0", "black>=24.2", "flake8>=7.0", "isort>=5.10", diff --git a/tests/helpers.py b/tests/helpers.py index dfaae4d..0e6e8a8 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -80,3 +80,12 @@ def generate_task_id(): str: Random task ID """ return f"test_task_id_{uuid4().hex}" + + +def generate_service_mount(): + """Generate and return a random service name. + + Returns: + str: Random service name. + """ + return f"/test_{uuid4().hex}" diff --git a/tests/static/service.zip b/tests/static/service.zip new file mode 100644 index 0000000000000000000000000000000000000000..00bf513ebf1066886e93020d36d46697ae55a134 GIT binary patch literal 2963 zcmZ`*c{r47A0FEnLyRR!SwkkrR+$9w;l`}#f4?>0a&F!F#vAP4|qkY!PtnG$eP z2fzZrNJ~u|lQn_BdH|RKxZ5~;$hdf4laYhfsa}QpjS-cMQ5N`OB$Kc~kS3C3*6AQyh5dm^L?K)=_stU@T|>CLc&k zIx9`0$KJb6Pk{tTIuPi{pNTx29dOth~CMvlR%Z!!zx$kU_?9TI%%%eDDte$SEm<|3sIuQv_K(*zz2-Wh6JKgOZw zcjQZDhCEwcS}x|yHjxAu-X5!ST1H(hQgxoU(DP08$1b=$R6VTQ7wO%CX`qcHpdI!f z?ReVb+`KIae{~eDudLrH$2PGcRhySwFmd}R_b)|75R;%ZRGgT~$;imJ`@R$%ue)?9 ztvlH7wkYk+=G&agY2ik^eA7_AtQbc(w``y8z6z_kggt#s)(J<^VsYzu&(pSD1>15U ze>~swpiDZVa#d3;Ig7JZ*f2Lr3>u{4@ zV#I=7Kv(^}=f4@^vqhPWI3W7)LKVH4@;!0OObmVn{*8a2sZLNn< zq#{^zdGs>8w>_G94oEWtIm+J))}@9YF>kOO7Q=H#!ukv&2-M2)ml!uaeE=c;5o0KI z(sLXE>zLd(`%Fp%>5qOL5pIkf8Kt);E_4UM!Ku9c6&U2E&Y4ofxnQaQDJ$;Nt$cX` z2hx6GYqhh6qd(fr$$Bhb3^Cy&n^KLq3L&=e>|GOs2I~Lz8^uozw=edf;B z!N{om=&Nn+w*?+aV_w%5D|(7AqhjmEs-LACh8Jc`GQ zq@xOO(MBt2e?BbE=yJgeHjIuLbZNHkPHaVJv}ic&2)F`#@C@E6kx)%F!S*ekTbXEc z?Yy95lc`?dUB4bm?m4}eeGK}xR2R{wied1VueJvZZ6V~-*xMWQ7iN(P4H-S{8f?o2 zu()RfT?Fh{Gg+tUIxFXUg_1dX9OH+a^bq!e; zv%gy_Bz>@{TGSzTQ}Fwzq6L*B@sMq-%n|c$(PL7NJkL zN$Z+~?&~E*ECKSrvv4&c!u$I-IZ!NHjWjqkX7vORzJxl)t9(N(@X0&wTB2!4!+a ztWRg0`N-~wO0V`GvmMUXzLlaDI;Az*8qf%w^@U3(%FApW%nU}JzTabUgVS@>lhZQ% zdIKT4k@itS&6*r5#f03M-V@+eM0`x_2MVQD^woO%psRRRhWxBAdC~3>+p7gB@)Dk* zuAOBi?z7{|?XcnVbC}bG6pso|4%;9%RRQyc?|Z1SFo++ib=LY_ZM`(QUA4vf2`(D< z#a$F*G6K!}L{YeiG~8v9-847P^@@v^rE>{2o#T3($j2;Qo~}#1LJT0vjFj-*uoWPZ zo9n*bKrJgYd(W7n7` z6F0zF}8?P|zLbdv<#Qq z=uNef(9(lM<(xBi3umzuCG!(uYiX96Hz=h1*Amw)lahyyNu*J1uWU61D9lxLYRJ-C zYos}y!a8!fhe|~UMC51sh4L=Ld78dO1*A11>=y6-ILjk0 zrYLuXootZO6VXg1c3t$NCO7QN)cc8E#*6#fmnfJrMOPoVUc%@3dvfP~*Q`)`QaIxI zWo}_(gyR!k-6flam>p&3#S+289XRH9i$nmG*8u=_3FyZ Date: Sat, 16 Aug 2025 19:05:42 +0800 Subject: [PATCH 27/47] Administration API (#66) * Getting started on administration API * Adding more Administration methods * Finishing up Administration API * Adding admin to to toctree --- arangoasync/database.py | 424 ++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 68 +++++- docs/admin.rst | 47 +++++ docs/index.rst | 1 + tests/test_database.py | 95 ++++++++- 5 files changed, 632 insertions(+), 3 deletions(-) create mode 100644 docs/admin.rst diff --git a/arangoasync/database.py b/arangoasync/database.py index be057c4..449b789 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -6,6 +6,7 @@ ] +from datetime import datetime from typing import Any, List, Optional, Sequence, TypeVar, cast from warnings import warn @@ -26,10 +27,12 @@ CollectionDeleteError, CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, GraphCreateError, GraphDeleteError, GraphListError, @@ -39,8 +42,22 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, ServerEncryptionError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerModeError, + ServerModeSetError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, ServerTLSError, ServerTLSReloadError, ServerVersionError, @@ -2437,6 +2454,413 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def engine(self) -> Result[Json]: + """Returns the storage engine the server is configured to use. + + Returns: + dict: Database engine details. + + Raises: + ServerEngineError: If the operation fails. + + References: + - `get-the-storage-engine-type `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/engine") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEngineError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def time(self) -> Result[datetime]: + """Return server system time. + + Returns: + datetime.datetime: Server system time. + + Raises: + ServerTimeError: If the operation fails. + + References: + - `get-the-system-time `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/time") + + def response_handler(resp: Response) -> datetime: + if not resp.is_success: + raise ServerTimeError(resp, request) + return datetime.fromtimestamp( + self.deserializer.loads(resp.raw_body)["time"] + ) + + return await self._executor.execute(request, response_handler) + + async def check_availability(self) -> Result[str]: + """Return ArangoDB server availability mode. + + Returns: + str: Server availability mode, either "readonly" or "default". + + Raises: + ServerCheckAvailabilityError: If the operation fails. + + References: + - `check-server-availability `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/availability", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerCheckAvailabilityError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def support_info(self) -> Result[Json]: + """Retrieves deployment information for support purposes. + + Note: + As this API may reveal sensitive data about the deployment, it can only be accessed from inside the _system database. + + Returns: + dict: Deployment information + + Raises: + DatabaseSupportInfoError: If the operation fails. + + References: + - `get-information-about-the-deployment `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/support-info") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DatabaseSupportInfoError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options(self) -> Result[Json]: + """Return the currently-set server options. + + Returns: + dict: Server options. + + Raises: + ServerCurrentOptionsGetError: If the operation fails. + + References: + - `get-the-startup-option-configuration `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerCurrentOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options_available(self) -> Result[Json]: + """Return a description of all available server options. + + Returns: + dict: Server options description. + + Raises: + ServerAvailableOptionsGetError: If the operation fails. + + References: + - `get-the-available-startup-options `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options-description") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerAvailableOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def mode(self) -> Result[str]: + """Return the server mode ("default" or "readonly"). + + Returns: + str: Server mode, either "default" or "readonly". + + Raises: + ServerModeError: If the operation fails. + + References: + - `return-whether-or-not-a-server-is-in-read-only-mode `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["mode"]) + + return await self._executor.execute(request, response_handler) + + async def set_mode(self, mode: str) -> Result[str]: + """Set the server mode to read-only or default. + + Args: + mode (str): Server mode. Possible values are "default" or "readonly". + + Returns: + str: New server mode. + + Raises: + ServerModeSetError: If the operation fails. + + References: + - `set-the-server-mode-to-read-only-or-default `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/server/mode", + data=self.serializer.dumps({"mode": mode}), + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def license(self) -> Result[Json]: + """View the license information and status of an Enterprise Edition instance. + + Returns: + dict: Server license information. + + Raises: + ServerLicenseGetError: If the operation fails. + + References: + - `get-information-about-the-current-license `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/license") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLicenseGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_license(self, license: str, force: Optional[bool] = False) -> None: + """Set a new license for an Enterprise Edition instance. + + Args: + license (str) -> Base64-encoded license string, wrapped in double-quotes. + force (bool | None) -> Set to `True` to change the license even if it + expires sooner than the current one. + + Raises: + ServerLicenseSetError: If the operation fails. + + References: + - `set-a-new-license `__ + """ # noqa: E501 + params: Params = {} + if force is not None: + params["force"] = force + + request = Request( + method=Method.PUT, + endpoint="/_admin/license", + params=params, + data=license, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerLicenseSetError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown(self, soft: Optional[bool] = None) -> None: + """Initiate server shutdown sequence. + + Args: + soft (bool | None): If set to `True`, this initiates a soft shutdown. + + Raises: + ServerShutdownError: If the operation fails. + + References: + - `start-the-shutdown-sequence `__ + """ # noqa: E501 + params: Params = {} + if soft is not None: + params["soft"] = soft + + request = Request( + method=Method.DELETE, + endpoint="/_admin/shutdown", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerShutdownError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown_progress(self) -> Result[Json]: + """Query the soft shutdown progress. + + Returns: + dict: Information about the shutdown progress. + + Raises: + ServerShutdownProgressError: If the operation fails. + + References: + - `query-the-soft-shutdown-progress `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/shutdown") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerShutdownProgressError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def compact( + self, + change_level: Optional[bool] = None, + compact_bottom_most_level: Optional[bool] = None, + ) -> None: + """Compact all databases. This method requires superuser access. + + Note: + This command can cause a full rewrite of all data in all databases, + which may take very long for large databases. + + Args: + change_level (bool | None): Whether or not compacted data should be + moved to the minimum possible level. Default value is `False`. + compact_bottom_most_level (bool | None): Whether or not to compact the bottom-most level of data. + Default value is `False`. + + Returns: + dict: Information about the compaction process. + + Raises: + DatabaseCompactError: If the operation fails. + + References: + - `compact-all-databases `__ + """ # noqa: E501 + data = {} + if change_level is not None: + data["changeLevel"] = change_level + if compact_bottom_most_level is not None: + data["compactBottomMostLevel"] = compact_bottom_most_level + + request = Request( + method=Method.PUT, + endpoint="/_admin/compact", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise DatabaseCompactError(resp, request) + + await self._executor.execute(request, response_handler) + + async def reload_routing(self) -> None: + """Reload the routing information. + + Raises: + ServerReloadRoutingError: If the operation fails. + + References: + - `reload-the-routing-table `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/routing/reload") + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerReloadRoutingError(resp, request) + + await self._executor.execute(request, response_handler) + + async def echo(self, body: Optional[Json] = None) -> Result[Json]: + """Return an object with the servers request information. + + Args: + body (dict | None): Optional body of the request. + + Returns: + dict: Details of the request. + + Raises: + ServerEchoError: If the operation fails. + + References: + - `echo-a-request `__ + """ # noqa: E501 + data = body if body is not None else {} + request = Request(method=Method.POST, endpoint="/_admin/echo", data=data) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEchoError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def execute(self, command: str) -> Result[Any]: + """Execute raw Javascript command on the server. + + Args: + command (str): Javascript command to execute. + + Returns: + Return value of **command**, if any. + + Raises: + ServerExecuteError: If the execution fails. + + References: + - `execute-a-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_admin/execute", data=command.encode("utf-8") + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise ServerExecuteError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 99340dd..96a432a 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -319,6 +319,10 @@ class CursorStateError(ArangoClientError): """The cursor object was in a bad state.""" +class DatabaseCompactError(ArangoServerError): + """Failed to compact databases.""" + + class DatabaseCreateError(ArangoServerError): """Failed to create database.""" @@ -335,6 +339,10 @@ class DatabasePropertiesError(ArangoServerError): """Failed to retrieve database properties.""" +class DatabaseSupportInfoError(ArangoServerError): + """Failed to retrieve support info for deployment.""" + + class DeserializationError(ArangoClientError): """Failed to deserialize the server response.""" @@ -547,14 +555,66 @@ class SerializationError(ArangoClientError): """Failed to serialize the request.""" -class ServerEncryptionError(ArangoServerError): - """Failed to reload user-defined encryption keys.""" +class ServerAvailableOptionsGetError(ArangoServerError): + """Failed to retrieve available server options.""" + + +class ServerCheckAvailabilityError(ArangoServerError): + """Failed to retrieve server availability mode.""" class ServerConnectionError(ArangoServerError): """Failed to connect to ArangoDB server.""" +class ServerCurrentOptionsGetError(ArangoServerError): + """Failed to retrieve currently-set server options.""" + + +class ServerEchoError(ArangoServerError): + """Failed to retrieve details on last request.""" + + +class ServerEncryptionError(ArangoServerError): + """Failed to reload user-defined encryption keys.""" + + +class ServerEngineError(ArangoServerError): + """Failed to retrieve database engine.""" + + +class ServerExecuteError(ArangoServerError): + """Failed to execute raw JavaScript command.""" + + +class ServerModeError(ArangoServerError): + """Failed to retrieve server mode.""" + + +class ServerModeSetError(ArangoServerError): + """Failed to set server mode.""" + + +class ServerLicenseGetError(ArangoServerError): + """Failed to retrieve server license.""" + + +class ServerLicenseSetError(ArangoServerError): + """Failed to set server license.""" + + +class ServerReloadRoutingError(ArangoServerError): + """Failed to reload routing details.""" + + +class ServerShutdownError(ArangoServerError): + """Failed to initiate shutdown sequence.""" + + +class ServerShutdownProgressError(ArangoServerError): + """Failed to retrieve soft shutdown progress.""" + + class ServerStatusError(ArangoServerError): """Failed to retrieve server status.""" @@ -567,6 +627,10 @@ class ServerTLSReloadError(ArangoServerError): """Failed to reload TLS.""" +class ServerTimeError(ArangoServerError): + """Failed to retrieve server system time.""" + + class ServerVersionError(ArangoServerError): """Failed to retrieve server version.""" diff --git a/docs/admin.rst b/docs/admin.rst new file mode 100644 index 0000000..6a494d1 --- /dev/null +++ b/docs/admin.rst @@ -0,0 +1,47 @@ +Server Administration +--------------------- + +ArangoDB provides operations for server administration and monitoring. +Most of these operations can only be performed by admin users via the +``_system`` database. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + sys_db = await client.db("_system", auth=auth) + + # Retrieve the database engine. + await sys_db.engine() + + # Retrieve the server time.. + time = await sys_db.time() + + # Check server availability + availability = sys_db.check_availability() + + # Support info + info = sys_db.support_info() + + # Get the startup option configuration + options = await sys_db.options() + + # Get the available startup options + options = await sys_db.options_available() + + # Return whether or not a server is in read-only mode + mode = await sys_db.mode() + + # Get license information + license = await sys_db.license() + + # Execute Javascript on the server + result = await sys_db.execute("return 1") diff --git a/docs/index.rst b/docs/index.rst index 78afe62..0fab3ac 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -61,6 +61,7 @@ Contents .. toctree:: :maxdepth: 1 + admin user **Miscellaneous** diff --git a/tests/test_database.py b/tests/test_database.py index 7058ac1..5daa837 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,21 +1,39 @@ import asyncio +import datetime import pytest from packaging import version +from arangoasync.client import ArangoClient from arangoasync.collection import StandardCollection from arangoasync.exceptions import ( CollectionCreateError, CollectionDeleteError, CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerModeError, + ServerModeSetError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, ServerVersionError, ) from arangoasync.typings import CollectionType, KeyOptions, UserInfo @@ -23,7 +41,9 @@ @pytest.mark.asyncio -async def test_database_misc_methods(sys_db, db, bad_db, cluster, db_version): +async def test_database_misc_methods( + sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token +): # Status status = await sys_db.status() assert status["server"] == "arango" @@ -64,6 +84,79 @@ async def test_database_misc_methods(sys_db, db, bad_db, cluster, db_version): with pytest.raises(CollectionKeyGeneratorsError): await bad_db.key_generators() + # Administration + with pytest.raises(ServerEngineError): + await bad_db.engine() + result = await db.engine() + assert isinstance(result, dict) + + with pytest.raises(ServerTimeError): + await bad_db.time() + time = await db.time() + assert isinstance(time, datetime.datetime) + + with pytest.raises(ServerCheckAvailabilityError): + await bad_db.check_availability() + assert isinstance(await db.check_availability(), str) + + with pytest.raises(DatabaseSupportInfoError): + await bad_db.support_info() + info = await sys_db.support_info() + assert isinstance(info, dict) + + if db_version >= version.parse("3.12.0"): + with pytest.raises(ServerCurrentOptionsGetError): + await bad_db.options() + options = await sys_db.options() + assert isinstance(options, dict) + with pytest.raises(ServerAvailableOptionsGetError): + await bad_db.options_available() + options_available = await sys_db.options_available() + assert isinstance(options_available, dict) + + with pytest.raises(ServerModeError): + await bad_db.mode() + mode = await sys_db.mode() + assert isinstance(mode, str) + with pytest.raises(ServerModeSetError): + await bad_db.set_mode("foo") + mode = await sys_db.set_mode("default") + assert isinstance(mode, str) + + with pytest.raises(ServerLicenseGetError): + await bad_db.license() + license = await sys_db.license() + assert isinstance(license, dict) + with pytest.raises(ServerLicenseSetError): + await sys_db.set_license('"abc"') + + with pytest.raises(ServerShutdownError): + await bad_db.shutdown() + with pytest.raises(ServerShutdownProgressError): + await bad_db.shutdown_progress() + + with pytest.raises(ServerReloadRoutingError): + await bad_db.reload_routing() + await sys_db.reload_routing() + + with pytest.raises(ServerEchoError): + await bad_db.echo() + result = await sys_db.echo() + assert isinstance(result, dict) + + with pytest.raises(ServerExecuteError): + await bad_db.execute("return 1") + result = await sys_db.execute("return 1") + assert result == 1 + + with pytest.raises(DatabaseCompactError): + await bad_db.compact() + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + await db.compact() + @pytest.mark.asyncio async def test_create_drop_database( From 1dd20747988976588d0a3b16d5d5d9d21fdbed70 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 16 Aug 2025 19:49:13 +0800 Subject: [PATCH 28/47] Adding custom requests (#67) --- arangoasync/database.py | 15 +++++++++++++++ docs/database.rst | 7 +++++++ tests/test_database.py | 9 +++++++++ 3 files changed, 31 insertions(+) diff --git a/arangoasync/database.py b/arangoasync/database.py index 449b789..813a1ab 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -2861,6 +2861,21 @@ def response_handler(resp: Response) -> Any: return await self._executor.execute(request, response_handler) + async def request(self, request: Request) -> Result[Response]: + """Execute a custom request. + + Args: + request (Request): Request object to be executed. + + Returns: + Response: Response object containing the result of the request. + """ + + def response_handler(resp: Response) -> Response: + return resp + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/docs/database.rst b/docs/database.rst index 851cc9d..f4dc759 100644 --- a/docs/database.rst +++ b/docs/database.rst @@ -14,6 +14,7 @@ information. from arangoasync import ArangoClient from arangoasync.auth import Auth + from arangoasync.request import Method, Request # Initialize the client for ArangoDB. async with ArangoClient(hosts="http://localhost:8529") as client: @@ -60,4 +61,10 @@ information. # Delete the database. Note that the new users will remain. await sys_db.delete_database("test") + # Example of a custom request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + See :class:`arangoasync.client.ArangoClient` and :class:`arangoasync.database.StandardDatabase` for API specification. diff --git a/tests/test_database.py b/tests/test_database.py index 5daa837..c9a260b 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,5 +1,6 @@ import asyncio import datetime +import json import pytest from packaging import version @@ -36,6 +37,7 @@ ServerTimeError, ServerVersionError, ) +from arangoasync.request import Method, Request from arangoasync.typings import CollectionType, KeyOptions, UserInfo from tests.helpers import generate_col_name, generate_db_name, generate_username @@ -157,6 +159,13 @@ async def test_database_misc_methods( ) await db.compact() + # Custom Request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + assert json.loads(response.raw_body) == 1 + @pytest.mark.asyncio async def test_create_drop_database( From f1de45bf445f500848d3b0e413355a026b4d7be6 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 16 Aug 2025 21:20:16 +0800 Subject: [PATCH 29/47] Monitoring API (#68) * Adding monitoring API * API calls only in 3.12 * API calls only in enterprise --- arangoasync/database.py | 343 +++++++++++++++++++++++++++++++++++++- arangoasync/exceptions.py | 32 ++++ docs/admin.rst | 3 + tests/test_database.py | 54 +++++- 4 files changed, 430 insertions(+), 2 deletions(-) diff --git a/arangoasync/database.py b/arangoasync/database.py index 813a1ab..2997bab 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -7,7 +7,7 @@ from datetime import datetime -from typing import Any, List, Optional, Sequence, TypeVar, cast +from typing import Any, Dict, List, Optional, Sequence, TypeVar, cast from warnings import warn from arangoasync.aql import AQL @@ -42,6 +42,7 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerApiCallsError, ServerAvailableOptionsGetError, ServerCheckAvailabilityError, ServerCurrentOptionsGetError, @@ -51,8 +52,15 @@ ServerExecuteError, ServerLicenseGetError, ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, ServerModeError, ServerModeSetError, + ServerReadLogError, ServerReloadRoutingError, ServerShutdownError, ServerShutdownProgressError, @@ -2876,6 +2884,339 @@ def response_handler(resp: Response) -> Response: return await self._executor.execute(request, response_handler) + async def metrics(self, server_id: Optional[str] = None) -> Result[str]: + """Return server metrics in Prometheus format. + + Args: + server_id (str | None): Returns metrics of the specified server. + If no serverId is given, the asked server will reply. + + Returns: + str: Server metrics in Prometheus format. + + Raises: + ServerMetricsError: If the operation fails. + + References: + - `metrics-api-v2 `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.GET, + endpoint="/_admin/metrics/v2", + params=params, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerMetricsError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def read_log_entries( + self, + upto: Optional[int | str] = None, + level: Optional[str] = None, + start: Optional[int] = None, + size: Optional[int] = None, + offset: Optional[int] = None, + search: Optional[str] = None, + sort: Optional[str] = None, + server_id: Optional[str] = None, + ) -> Result[Json]: + """Read the global log from server. + + Args: + upto (int | str | None): Return the log entries up to the given level + (mutually exclusive with parameter **level**). Allowed values are + "fatal", "error", "warning", "info" (default), "debug" and "trace". + level (int | str | None): Return the log entries of only the given level + (mutually exclusive with **upto**). + start (int | None): Return the log entries whose ID is greater or equal to + the given value. + size (int | None): Restrict the size of the result to the given value. + This can be used for pagination. + offset (int | None): Number of entries to skip (e.g. for pagination). + search (str | None): Return only the log entries containing the given text. + sort (str | None): Sort the log entries according to the given fashion, + which can be "sort" or "desc". + server_id (str | None): Returns all log entries of the specified server. + If no serverId is given, the asked server will reply. + + Returns: + dict: Server log entries. + + Raises: + ServerReadLogError: If the operation fails. + + References: + - `get-the-global-server-logs `__ + """ # noqa: E501 + params: Params = {} + if upto is not None: + params["upto"] = upto + if level is not None: + params["level"] = level + if start is not None: + params["start"] = start + if size is not None: + params["size"] = size + if offset is not None: + params["offset"] = offset + if search is not None: + params["search"] = search + if sort is not None: + params["sort"] = sort + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.GET, + endpoint="/_admin/log/entries", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerReadLogError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def log_levels( + self, server_id: Optional[str] = None, with_appenders: Optional[bool] = None + ) -> Result[Json]: + """Return current logging levels. + + Args: + server_id (str | None): Forward the request to the specified server. + with_appenders (bool | None): Include appenders in the response. + + Returns: + dict: Current logging levels. + + Raises: + ServerLogLevelError: If the operation fails. + + References: + - `get-the-server-log-levels `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders + + request = Request( + method=Method.GET, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_log_levels( + self, + server_id: Optional[str] = None, + with_appenders: Optional[bool] = None, + **kwargs: Dict[str, Any], + ) -> Result[Json]: + """Set the logging levels. + + This method takes arbitrary keyword arguments where the keys are the + logger names and the values are the logging levels. For example: + + .. code-block:: python + + db.set_log_levels( + agency='DEBUG', + collector='INFO', + threads='WARNING' + ) + + Keys that are not valid logger names are ignored. + + Args: + server_id (str | None) -> Forward the request to a specific server. + with_appenders (bool | None): Include appenders in the response. + kwargs (dict): Logging levels to be set. + + Returns: + dict: New logging levels. + + Raises: + ServerLogLevelSetError: If the operation fails. + + References: + - `set-the-structured-log-settings `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders + + request = Request( + method=Method.PUT, + endpoint="/_admin/log/level", + params=params, + data=self.serializer.dumps(kwargs), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def reset_log_levels(self, server_id: Optional[str] = None) -> Result[Json]: + """Reset the logging levels. + + Revert the server’s log level settings to the values they had at startup, + as determined by the startup options specified on the command-line, + a configuration file, and the factory defaults. + + Args: + server_id: Forward the request to a specific server. + + Returns: + dict: New logging levels. + + Raises: + ServerLogLevelResetError: If the operation fails. + + References: + - `reset-the-server-log-levels `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.DELETE, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelResetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def log_settings(self) -> Result[Json]: + """Get the structured log settings. + + Returns: + dict: Current structured log settings. + + Raises: + ServerLogSettingError: If the operation fails. + + References: + - `get-the-structured-log-settings `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/log/structured", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: + """Set the structured log settings. + + This method takes arbitrary keyword arguments where the keys are the + structured log parameters and the values are true or false, for either + enabling or disabling the parameters. + + .. code-block:: python + + db.set_log_settings( + database=True, + url=True, + username=False, + ) + + Args: + kwargs (dict): Structured log parameters to be set. + + Returns: + dict: New structured log settings. + + Raises: + ServerLogSettingSetError: If the operation fails. + + References: + - `set-the-structured-log-settings `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/log/structured", + data=self.serializer.dumps(kwargs), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def api_calls(self) -> Result[Json]: + """Get a list of the most recent requests with a timestamp and the endpoint. + + Returns: + dict: API calls made to the server. + + Raises: + ServerApiCallsError: If the operation fails. + + References: + - `get-recent-api-calls `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/api-calls", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerApiCallsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 96a432a..ebe028e 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -555,6 +555,10 @@ class SerializationError(ArangoClientError): """Failed to serialize the request.""" +class ServerApiCallsError(ArangoServerError): + """Failed to retrieve the list of recent API calls.""" + + class ServerAvailableOptionsGetError(ArangoServerError): """Failed to retrieve available server options.""" @@ -587,6 +591,10 @@ class ServerExecuteError(ArangoServerError): """Failed to execute raw JavaScript command.""" +class ServerMetricsError(ArangoServerError): + """Failed to retrieve server metrics.""" + + class ServerModeError(ArangoServerError): """Failed to retrieve server mode.""" @@ -603,6 +611,30 @@ class ServerLicenseSetError(ArangoServerError): """Failed to set server license.""" +class ServerLogLevelError(ArangoServerError): + """Failed to retrieve server log levels.""" + + +class ServerLogLevelResetError(ArangoServerError): + """Failed to reset server log levels.""" + + +class ServerLogLevelSetError(ArangoServerError): + """Failed to set server log levels.""" + + +class ServerLogSettingError(ArangoServerError): + """Failed to retrieve server log settings.""" + + +class ServerLogSettingSetError(ArangoServerError): + """Failed to set server log settings.""" + + +class ServerReadLogError(ArangoServerError): + """Failed to retrieve global log.""" + + class ServerReloadRoutingError(ArangoServerError): """Failed to reload routing details.""" diff --git a/docs/admin.rst b/docs/admin.rst index 6a494d1..6120567 100644 --- a/docs/admin.rst +++ b/docs/admin.rst @@ -45,3 +45,6 @@ Most of these operations can only be performed by admin users via the # Execute Javascript on the server result = await sys_db.execute("return 1") + + # Get metrics in Prometheus format + metrics = await db.metrics() diff --git a/tests/test_database.py b/tests/test_database.py index c9a260b..425007b 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -20,6 +20,7 @@ DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ServerApiCallsError, ServerAvailableOptionsGetError, ServerCheckAvailabilityError, ServerCurrentOptionsGetError, @@ -28,8 +29,15 @@ ServerExecuteError, ServerLicenseGetError, ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, ServerModeError, ServerModeSetError, + ServerReadLogError, ServerReloadRoutingError, ServerShutdownError, ServerShutdownProgressError, @@ -44,7 +52,7 @@ @pytest.mark.asyncio async def test_database_misc_methods( - sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token + sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token, enterprise ): # Status status = await sys_db.status() @@ -166,6 +174,50 @@ async def test_database_misc_methods( response = await sys_db.request(request) assert json.loads(response.raw_body) == 1 + if enterprise and db_version >= version.parse("3.12.0"): + # API calls + with pytest.raises(ServerApiCallsError): + await bad_db.api_calls() + result = await sys_db.api_calls() + assert isinstance(result, dict) + + +@pytest.mark.asyncio +async def test_metrics(db, bad_db): + with pytest.raises(ServerMetricsError): + await bad_db.metrics() + metrics = await db.metrics() + assert isinstance(metrics, str) + + +@pytest.mark.asyncio +async def test_logs(sys_db, bad_db): + with pytest.raises(ServerReadLogError): + await bad_db.read_log_entries() + result = await sys_db.read_log_entries() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelError): + await bad_db.log_levels() + result = await sys_db.log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelSetError): + await bad_db.set_log_levels() + new_levels = {"agency": "DEBUG", "engines": "INFO", "threads": "WARNING"} + result = await sys_db.set_log_levels(**new_levels) + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelResetError): + await bad_db.reset_log_levels() + result = await sys_db.reset_log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingError): + await bad_db.log_settings() + result = await sys_db.log_settings() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingSetError): + await bad_db.set_log_settings() + result = await sys_db.set_log_settings() + assert isinstance(result, dict) + @pytest.mark.asyncio async def test_create_drop_database( From 4bc2ca70eabc8a75c55887d4623acbc66b6e0fa8 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 17 Aug 2025 12:48:53 +0800 Subject: [PATCH 30/47] Replication APIjk (#69) * Adding replication API * Test fixes --- arangoasync/database.py | 10 ++ arangoasync/exceptions.py | 28 ++++ arangoasync/replication.py | 270 +++++++++++++++++++++++++++++++++++++ docs/migration.rst | 9 +- tests/test_database.py | 37 +++++ 5 files changed, 353 insertions(+), 1 deletion(-) create mode 100644 arangoasync/replication.py diff --git a/arangoasync/database.py b/arangoasync/database.py index 2997bab..a28fa43 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -101,6 +101,7 @@ ) from arangoasync.foxx import Foxx from arangoasync.graph import Graph +from arangoasync.replication import Replication from arangoasync.request import Method, Request from arangoasync.response import Response from arangoasync.result import Result @@ -234,6 +235,15 @@ def foxx(self) -> Foxx: """ return Foxx(self._executor) + @property + def replication(self) -> Replication: + """Return Replication API wrapper. + + Returns: + Replication API wrapper. + """ + return Replication(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index ebe028e..5a904ee 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -551,6 +551,34 @@ class PermissionUpdateError(ArangoServerError): """Failed to update user permission.""" +class ReplicationApplierConfigError(ArangoServerError): + """Failed to retrieve replication applier configuration.""" + + +class ReplicationApplierStateError(ArangoServerError): + """Failed to retrieve replication applier state.""" + + +class ReplicationClusterInventoryError(ArangoServerError): + """Failed to retrieve overview of collection and indexes in a cluster.""" + + +class ReplicationDumpError(ArangoServerError): + """Failed to retrieve collection content.""" + + +class ReplicationInventoryError(ArangoServerError): + """Failed to retrieve inventory of collection and indexes.""" + + +class ReplicationLoggerStateError(ArangoServerError): + """Failed to retrieve logger state.""" + + +class ReplicationServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + class SerializationError(ArangoClientError): """Failed to serialize the request.""" diff --git a/arangoasync/replication.py b/arangoasync/replication.py new file mode 100644 index 0000000..9d96709 --- /dev/null +++ b/arangoasync/replication.py @@ -0,0 +1,270 @@ +__all__ = ["Replication"] + + +from typing import Optional + +from arangoasync.exceptions import ( + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Replication: + """Replication API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def inventory( + self, + batch_id: str, + include_system: Optional[bool] = None, + all_databases: Optional[bool] = None, + collection: Optional[bool] = None, + db_server: Optional[str] = None, + ) -> Result[Json]: + """ + Return an overview of collections and indexes. + + Args: + batch_id (str): Batch ID. + include_system (bool | None): Include system collections. + all_databases (bool | None): Include all databases (only on "_system"). + collection (bool | None): If this parameter is set, the + response will be restricted to a single collection (the one specified), + and no views will be returned. + db_server (str | None): On a Coordinator, this request must have a + DBserver query parameter + + Returns: + dict: Overview of collections and indexes. + + Raises: + ReplicationInventoryError: If retrieval fails. + + References: + - `get-a-replication-inventory `__ + """ # noqa: E501 + params: Params = dict() + params["batchId"] = batch_id + if include_system is not None: + params["includeSystem"] = include_system + if all_databases is not None: + params["global"] = all_databases + if collection is not None: + params["collection"] = collection + if db_server is not None: + params["DBServer"] = db_server + + request = Request( + method=Method.GET, + endpoint="/_api/replication/inventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dump( + self, + collection: str, + batch_id: Optional[str] = None, + chunk_size: Optional[int] = None, + ) -> Result[bytes]: + """Return the events data of one collection. + + Args: + collection (str): ID of the collection to dump. + batch_id (str | None): Batch ID. + chunk_size (int | None): Size of the result in bytes. This value is honored + approximately only. + + Returns: + bytes: Collection events data. + + Raises: + ReplicationDumpError: If retrieval fails. + + References: + - `get-a-replication-dump `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = collection + if batch_id is not None: + params["batchId"] = batch_id + if chunk_size is not None: + params["chunkSize"] = chunk_size + + request = Request( + method=Method.GET, + endpoint="/_api/replication/dump", + params=params, + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise ReplicationDumpError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def cluster_inventory( + self, include_system: Optional[bool] = None + ) -> Result[Json]: + """Return an overview of collections and indexes in a cluster. + + Args: + include_system (bool | None): Include system collections. + + Returns: + dict: Overview of collections and indexes in the cluster. + + Raises: + ReplicationClusterInventoryError: If retrieval fails. + + References: + - `get-the-cluster-collections-and-indexes `__ + """ # noqa: E501 + params: Params = {} + if include_system is not None: + params["includeSystem"] = include_system + + request = Request( + method=Method.GET, + endpoint="/_api/replication/clusterInventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationClusterInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def logger_state(self) -> Result[Json]: + """Return the state of the replication logger. + + Returns: + dict: Logger state. + + Raises: + ReplicationLoggerStateError: If retrieval fails. + + References: + - `get-the-replication-logger-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/logger-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationLoggerStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_config(self) -> Result[Json]: + """Return the configuration of the replication applier. + + Returns: + dict: Configuration of the replication applier. + + Raises: + ReplicationApplierConfigError: If retrieval fails. + + References: + - `get-the-replication-applier-configuration `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-config", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierConfigError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_state(self) -> Result[Json]: + """Return the state of the replication applier. + + Returns: + dict: State of the replication applier. + + Raises: + ReplicationApplierStateError: If retrieval fails. + + References: + - `get-the-replication-applier-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Return the current server's ID. + + Returns: + str: Server ID. + + Raises: + ReplicationServerIDError: If retrieval fails. + + References: + - `get-the-replication-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/server-id", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ReplicationServerIDError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["serverId"]) + + return await self._executor.execute(request, response_handler) diff --git a/docs/migration.rst b/docs/migration.rst index 7c2427e..0353a0d 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -51,7 +51,7 @@ this is not always consistent. The asynchronous driver, however, tries to stick to a simple rule: -* If the API returns a camel case key, it will be returned as is. +* If the API returns a camel case key, it will be returned as is. The response is returned from the server as is. * Parameters passed from client to server use the snake case equivalent of the camel case keys required by the API (e.g. `userName` becomes `user_name`). This is done to ensure PEP8 compatibility. @@ -74,6 +74,13 @@ Serialization Check out the :ref:`Serialization` section to learn more about how to implement your own serializer/deserializer. The current driver makes use of generic types and allows for a higher degree of customization. +Replication +=========== + +Although a minimal replication API is available for observability purposes, its use is not recommended. +Most of these are internal APIs that are not meant to be used by the end user. If you need to make any changes +to replication, please do so from the cluster web interface. + Mixing sync and async ===================== diff --git a/tests/test_database.py b/tests/test_database.py index 425007b..33dcc56 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -20,6 +20,13 @@ DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, ServerApiCallsError, ServerAvailableOptionsGetError, ServerCheckAvailabilityError, @@ -190,6 +197,36 @@ async def test_metrics(db, bad_db): assert isinstance(metrics, str) +@pytest.mark.asyncio +async def test_replication(db, bad_db, cluster): + with pytest.raises(ReplicationInventoryError): + await bad_db.replication.inventory("id") + with pytest.raises(ReplicationDumpError): + await bad_db.replication.dump("test_collection") + if cluster: + with pytest.raises(ReplicationClusterInventoryError): + await bad_db.replication.cluster_inventory() + result = await db.replication.cluster_inventory() + assert isinstance(result, dict) + if not cluster: + with pytest.raises(ReplicationLoggerStateError): + await bad_db.replication.logger_state() + result = await db.replication.logger_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierConfigError): + await bad_db.replication.applier_config() + result = await db.replication.applier_config() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierStateError): + await bad_db.replication.applier_state() + result = await db.replication.applier_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationServerIDError): + await bad_db.replication.server_id() + result = await db.replication.server_id() + assert isinstance(result, str) + + @pytest.mark.asyncio async def test_logs(sys_db, bad_db): with pytest.raises(ServerReadLogError): From 12530de10b9c1244d06c5002ad5dd35d9279b60c Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 18 Aug 2025 06:49:52 +0000 Subject: [PATCH 31/47] Version 1.0.0 --- CONTRIBUTING.md | 2 +- README.md | 2 +- arangoasync/version.py | 2 +- docs/index.rst | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 375d8b0..66044c4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,7 +10,7 @@ pre-commit install # Install git pre-commit hooks Run unit tests with coverage: ```shell -pytest --cov=arango --cov-report=html # Open htmlcov/index.html in your browser +pytest --enterprise --cluster --cov=arango --cov-report=html # Open htmlcov/index.html in your browser ``` To start and ArangoDB instance locally, run: diff --git a/README.md b/README.md index ab24eae..b80d633 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ database natively supporting documents, graphs and search. This is the _asyncio_ alternative of the [python-arango](https://github.com/arangodb/python-arango) driver. -**Note: This project is still in active development, features might be added or removed.** +Check out a demo app at [python-arango-async-demo](https://github.com/apetenchea/python-arango-async-demo). ## Requirements diff --git a/arangoasync/version.py b/arangoasync/version.py index b1a19e3..5becc17 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "0.0.5" +__version__ = "1.0.0" diff --git a/docs/index.rst b/docs/index.rst index 0fab3ac..52714c3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,8 +6,7 @@ python-arango-async ------------------- Welcome to the documentation for python-arango-async_, a Python driver for ArangoDB_. - -**Note: This project is still in active development, features might be added or removed.** +You can check out a demo app at [python-arango-async-demo](https://github.com/apetenchea/python-arango-async-demo). Requirements ============= From 59c085d3087f0a1213777f2fffa373e075f59cca Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 18 Aug 2025 07:05:31 +0000 Subject: [PATCH 32/47] Updated docs --- docs/specs.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/specs.rst b/docs/specs.rst index 763af9c..e8c0a32 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -31,6 +31,9 @@ python-arango-async. .. automodule:: arangoasync.backup :members: +.. automodule:: arangoasync.foxx + :members: + .. automodule:: arangoasync.cluster :members: @@ -57,3 +60,6 @@ python-arango-async. .. automodule:: arangoasync.result :members: + +.. automodule:: arangoasync.replication + :members: From dc72d6cb704ff7937bbb067a5ff817eaca0574a6 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 18 Aug 2025 07:06:06 +0000 Subject: [PATCH 33/47] Bumping version number --- arangoasync/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arangoasync/version.py b/arangoasync/version.py index 5becc17..5c4105c 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "1.0.0" +__version__ = "1.0.1" From caf33941d7d4151274db964ba6163680595cb4e7 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 18 Aug 2025 07:16:12 +0000 Subject: [PATCH 34/47] Updated docs --- arangoasync/version.py | 2 +- docs/index.rst | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arangoasync/version.py b/arangoasync/version.py index 5c4105c..7863915 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "1.0.1" +__version__ = "1.0.2" diff --git a/docs/index.rst b/docs/index.rst index 52714c3..b9ac826 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -6,7 +6,8 @@ python-arango-async ------------------- Welcome to the documentation for python-arango-async_, a Python driver for ArangoDB_. -You can check out a demo app at [python-arango-async-demo](https://github.com/apetenchea/python-arango-async-demo). + +You can check out a demo app at python-arango-async-demo_. Requirements ============= @@ -91,3 +92,4 @@ Contents .. _ArangoDB: https://www.arangodb.com .. _python-arango-async: https://github.com/arangodb/python-arango-async +.. _python-arango-async-demo: https://github.com/apetenchea/python-arango-async-demo From b4e4bb533bb7232ebb409ee559cde7735b7403f2 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 31 Oct 2025 22:21:36 +0800 Subject: [PATCH 35/47] Updating test matrix (#70) --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index fb1bc8e..836c418 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -20,8 +20,8 @@ workflows: parameters: python_version: ["3.10", "3.11", "3.12"] arangodb_config: ["single", "cluster"] - arangodb_license: ["community", "enterprise"] - arangodb_version: ["3.11", "3.12"] + arangodb_license: ["enterprise"] + arangodb_version: ["3.12"] jobs: lint: From 414a4de7e72e832492d26017e3d3ee84bcb24ac9 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 3 Nov 2025 15:47:11 +0800 Subject: [PATCH 36/47] /_admin/server/aql-queries (#71) * Implemented /_admin/server/api-calls * Version bump --- arangoasync/aql.py | 20 ++++++++++++++++++++ arangoasync/exceptions.py | 4 ++++ arangoasync/version.py | 2 +- tests/test_aql.py | 5 +++++ 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/arangoasync/aql.py b/arangoasync/aql.py index b81cade..1fad880 100644 --- a/arangoasync/aql.py +++ b/arangoasync/aql.py @@ -16,6 +16,7 @@ AQLQueryClearError, AQLQueryExecuteError, AQLQueryExplainError, + AQLQueryHistoryError, AQLQueryKillError, AQLQueryListError, AQLQueryRulesGetError, @@ -426,6 +427,25 @@ def response_handler(resp: Response) -> QueryTrackingConfiguration: return await self._executor.execute(request, response_handler) + async def history(self) -> Result[Json]: + """Return recently executed AQL queries (admin only). + + Returns: + dict: AQL query history. + + Raises: + AQLQueryHistoryError: If retrieval fails. + """ + request = Request(method=Method.GET, endpoint="/_admin/server/aql-queries") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLQueryHistoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + async def queries(self, all_queries: bool = False) -> Result[Jsons]: """Return a list of currently running queries. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index 5a904ee..a940e1b 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -111,6 +111,10 @@ class AQLQueryExplainError(ArangoServerError): """Failed to parse and explain query.""" +class AQLQueryHistoryError(ArangoServerError): + """Failed to retrieve running AQL queries.""" + + class AQLQueryKillError(ArangoServerError): """Failed to kill the query.""" diff --git a/arangoasync/version.py b/arangoasync/version.py index 7863915..976498a 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "1.0.2" +__version__ = "1.0.3" diff --git a/tests/test_aql.py b/tests/test_aql.py index ab5ba19..24f233f 100644 --- a/tests/test_aql.py +++ b/tests/test_aql.py @@ -21,6 +21,7 @@ AQLQueryClearError, AQLQueryExecuteError, AQLQueryExplainError, + AQLQueryHistoryError, AQLQueryKillError, AQLQueryListError, AQLQueryRulesGetError, @@ -96,6 +97,8 @@ async def test_list_queries(superuser, db, bad_db): _ = await superuser.aql.slow_queries(all_queries=True) await aql.clear_slow_queries() await superuser.aql.clear_slow_queries(all_queries=True) + history = await superuser.aql.history() + assert isinstance(history, dict) with pytest.raises(AQLQueryListError): _ = await bad_db.aql.queries() @@ -109,6 +112,8 @@ async def test_list_queries(superuser, db, bad_db): _ = await aql.slow_queries(all_queries=True) with pytest.raises(AQLQueryClearError): await aql.clear_slow_queries(all_queries=True) + with pytest.raises(AQLQueryHistoryError): + _ = await bad_db.aql.history() long_running_task.cancel() From 5f20e5cbf709b278eee20e0c4fec731d8253c1a2 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Tue, 9 Dec 2025 19:43:13 +0800 Subject: [PATCH 37/47] Do not add 8529 to the ports list, if another one is already specified (#72) --- tests/conftest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 66e5a9d..295b946 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -39,7 +39,7 @@ def pytest_addoption(parser): "--host", action="store", default="127.0.0.1", help="ArangoDB host address" ) parser.addoption( - "--port", action="append", default=["8529"], help="ArangoDB coordinator ports" + "--port", action="append", default=None, help="ArangoDB coordinator ports" ) parser.addoption( "--root", action="store", default="root", help="ArangoDB root user" @@ -59,7 +59,7 @@ def pytest_addoption(parser): def pytest_configure(config): - ports = config.getoption("port") + ports = config.getoption("port") or ["8529"] hosts = [f"http://{config.getoption('host')}:{p}" for p in ports] url = hosts[0] From 219d71ab012c0dc4bc2c245209c0a12171aedfed Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 14 Dec 2025 23:32:36 +0800 Subject: [PATCH 38/47] Test-only improvements (#73) * Test-only improvements * enterprise option is obsolete * Updating circleci config * Fix smartgraph --- .circleci/config.yml | 4 ++-- pyproject.toml | 1 + tests/conftest.py | 23 +++++++++++++++++------ tests/static/cluster-3.11.conf | 14 -------------- tests/static/single-3.11.conf | 12 ------------ tests/test_analyzer.py | 4 ++-- tests/test_aql.py | 8 +++----- tests/test_backup.py | 23 +++++------------------ tests/test_client.py | 7 ++++--- tests/test_cluster.py | 4 ++-- tests/test_database.py | 4 ++-- tests/test_foxx.py | 5 ++++- tests/test_graph.py | 4 ++-- tests/test_transaction.py | 5 ++++- 14 files changed, 48 insertions(+), 70 deletions(-) delete mode 100644 tests/static/cluster-3.11.conf delete mode 100644 tests/static/single-3.11.conf diff --git a/.circleci/config.yml b/.circleci/config.yml index 836c418..b71ba0b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -86,8 +86,8 @@ jobs: args+=("--cluster" "--port=8539" "--port=8549") fi - if [ << parameters.arangodb_license >> = "enterprise" ]; then - args+=("--enterprise") + if [ << parameters.arangodb_license >> != "enterprise" ]; then + args+=("--skip enterprise") fi echo "Running pytest with args: ${args[@]}" diff --git a/pyproject.toml b/pyproject.toml index ef00aea..b01c76f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,6 +60,7 @@ dev = [ "pytest-cov>=5.0", "sphinx>=7.3", "sphinx_rtd_theme>=2.0", + "allure-pytest>=2.15", "types-setuptools", ] diff --git a/tests/conftest.py b/tests/conftest.py index 295b946..c09292d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -27,8 +27,8 @@ class GlobalData: graph_name: str = "test_graph" username: str = generate_username() cluster: bool = False - enterprise: bool = False - db_version: version = version.parse("0.0.0") + skip: list[str] = None + db_version: version.Version = version.parse("0.0.0") global_data = GlobalData() @@ -54,7 +54,18 @@ def pytest_addoption(parser): "--cluster", action="store_true", help="Run tests in a cluster setup" ) parser.addoption( - "--enterprise", action="store_true", help="Run tests in an enterprise setup" + "--skip", + action="store", + nargs="*", + choices=[ + "backup", # backup tests + "jwt-secret-keyfile", # server was not configured with a keyfile + "foxx", # foxx is not supported + "js-transactions", # javascript transactions are not supported + "enterprise", # skip what used to be "enterprise-only" before 3.12 + ], + default=[], + help="Skip specific tests", ) @@ -69,7 +80,7 @@ def pytest_configure(config): global_data.secret = config.getoption("secret") global_data.token = JwtToken.generate_token(global_data.secret) global_data.cluster = config.getoption("cluster") - global_data.enterprise = config.getoption("enterprise") + global_data.skip = config.getoption("skip") global_data.graph_name = generate_graph_name() async def get_db_version(): @@ -112,8 +123,8 @@ def cluster(): @pytest.fixture -def enterprise(): - return global_data.enterprise +def skip_tests(): + return global_data.skip @pytest.fixture diff --git a/tests/static/cluster-3.11.conf b/tests/static/cluster-3.11.conf deleted file mode 100644 index 86f7855..0000000 --- a/tests/static/cluster-3.11.conf +++ /dev/null @@ -1,14 +0,0 @@ -[starter] -mode = cluster -local = true -address = 0.0.0.0 -port = 8528 - -[auth] -jwt-secret = /tests/static/keyfile - -[args] -all.database.password = passwd -all.database.extended-names = true -all.log.api-enabled = true -all.javascript.allow-admin-execute = true diff --git a/tests/static/single-3.11.conf b/tests/static/single-3.11.conf deleted file mode 100644 index df45cb7..0000000 --- a/tests/static/single-3.11.conf +++ /dev/null @@ -1,12 +0,0 @@ -[starter] -mode = single -address = 0.0.0.0 -port = 8528 - -[auth] -jwt-secret = /tests/static/keyfile - -[args] -all.database.password = passwd -all.database.extended-names = true -all.javascript.allow-admin-execute = true diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py index 856b6d7..0557f64 100644 --- a/tests/test_analyzer.py +++ b/tests/test_analyzer.py @@ -11,7 +11,7 @@ @pytest.mark.asyncio -async def test_analyzer_management(db, bad_db, enterprise, db_version): +async def test_analyzer_management(db, bad_db, skip_tests, db_version): analyzer_name = generate_analyzer_name() full_analyzer_name = db.name + "::" + analyzer_name bad_analyzer_name = generate_analyzer_name() @@ -68,7 +68,7 @@ async def test_analyzer_management(db, bad_db, enterprise, db_version): assert await db.delete_analyzer(analyzer_name, ignore_missing=True) is False # Test create geo_s2 analyzer - if enterprise: + if "enterprise" not in skip_tests: analyzer_name = generate_analyzer_name() result = await db.create_analyzer(analyzer_name, "geo_s2", properties={}) assert result["type"] == "geo_s2" diff --git a/tests/test_aql.py b/tests/test_aql.py index 24f233f..28fa91c 100644 --- a/tests/test_aql.py +++ b/tests/test_aql.py @@ -279,17 +279,15 @@ async def test_cache_plan_management(db, bad_db, doc_col, docs, db_version): entries = await cache.plan_entries() assert isinstance(entries, list) assert len(entries) > 0 - with pytest.raises(AQLCacheEntriesError) as err: - _ = await bad_db.aql.cache.plan_entries() - assert err.value.error_code == FORBIDDEN + with pytest.raises(AQLCacheEntriesError): + await bad_db.aql.cache.plan_entries() # Clear the cache await cache.clear_plan() entries = await cache.plan_entries() assert len(entries) == 0 - with pytest.raises(AQLCacheClearError) as err: + with pytest.raises(AQLCacheClearError): await bad_db.aql.cache.clear_plan() - assert err.value.error_code == FORBIDDEN @pytest.mark.asyncio diff --git a/tests/test_backup.py b/tests/test_backup.py index d2fb07e..3bb5492 100644 --- a/tests/test_backup.py +++ b/tests/test_backup.py @@ -2,19 +2,12 @@ from packaging import version from arangoasync.client import ArangoClient -from arangoasync.exceptions import ( - BackupCreateError, - BackupDeleteError, - BackupDownloadError, - BackupGetError, - BackupRestoreError, - BackupUploadError, -) +from arangoasync.exceptions import BackupDeleteError, BackupRestoreError @pytest.mark.asyncio -async def test_backup(url, sys_db_name, bad_db, token, enterprise, cluster, db_version): - if not enterprise: +async def test_backup(url, sys_db_name, bad_db, token, cluster, db_version, skip_tests): + if "enterprise" in skip_tests: pytest.skip("Backup API is only available in ArangoDB Enterprise Edition") if not cluster: pytest.skip("For simplicity, the backup API is only tested in cluster setups") @@ -22,19 +15,13 @@ async def test_backup(url, sys_db_name, bad_db, token, enterprise, cluster, db_v pytest.skip( "For simplicity, the backup API is only tested in the latest versions" ) + if "backup" in skip_tests: + pytest.skip("Skipping backup tests") - with pytest.raises(BackupCreateError): - await bad_db.backup.create() - with pytest.raises(BackupGetError): - await bad_db.backup.get() with pytest.raises(BackupRestoreError): await bad_db.backup.restore("foobar") with pytest.raises(BackupDeleteError): await bad_db.backup.delete("foobar") - with pytest.raises(BackupUploadError): - await bad_db.backup.upload() - with pytest.raises(BackupDownloadError): - await bad_db.backup.download() async with ArangoClient(hosts=url) as client: db = await client.db( diff --git a/tests/test_client.py b/tests/test_client.py index cb488a7..cbd96d4 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -121,16 +121,17 @@ async def test_client_jwt_auth(url, sys_db_name, basic_auth_root): @pytest.mark.asyncio async def test_client_jwt_superuser_auth( - url, sys_db_name, basic_auth_root, token, enterprise + url, sys_db_name, basic_auth_root, token, skip_tests ): # successful authentication async with ArangoClient(hosts=url) as client: db = await client.db( sys_db_name, auth_method="superuser", token=token, verify=True ) - if enterprise: + if "enterprise" not in skip_tests: await db.jwt_secrets() - await db.reload_jwt_secrets() + if "jwt-secret-keyfile" not in skip_tests: + await db.reload_jwt_secrets() # Get TLS data tls = await db.tls() diff --git a/tests/test_cluster.py b/tests/test_cluster.py index d5b0b75..9a68a6b 100644 --- a/tests/test_cluster.py +++ b/tests/test_cluster.py @@ -15,11 +15,11 @@ @pytest.mark.asyncio async def test_cluster( - url, sys_db_name, bad_db, token, enterprise, cluster, db_version + url, sys_db_name, bad_db, token, skip_tests, cluster, db_version ): if not cluster: pytest.skip("Cluster API is only tested in cluster setups") - if not enterprise or db_version < version.parse("3.12.0"): + if "enterprise" in skip_tests or db_version < version.parse("3.12.0"): pytest.skip( "For simplicity, the cluster API is only tested in the latest versions" ) diff --git a/tests/test_database.py b/tests/test_database.py index 33dcc56..519d0ce 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -59,7 +59,7 @@ @pytest.mark.asyncio async def test_database_misc_methods( - sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token, enterprise + sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token, skip_tests ): # Status status = await sys_db.status() @@ -181,7 +181,7 @@ async def test_database_misc_methods( response = await sys_db.request(request) assert json.loads(response.raw_body) == 1 - if enterprise and db_version >= version.parse("3.12.0"): + if "enterprise" not in skip_tests and db_version >= version.parse("3.12.0"): # API calls with pytest.raises(ServerApiCallsError): await bad_db.api_calls() diff --git a/tests/test_foxx.py b/tests/test_foxx.py index 065530d..c407215 100644 --- a/tests/test_foxx.py +++ b/tests/test_foxx.py @@ -35,7 +35,10 @@ @pytest.mark.asyncio -async def test_foxx(db, bad_db): +async def test_foxx(db, bad_db, skip_tests): + if "foxx" in skip_tests: + pytest.skip("Skipping Foxx tests") + # Test errors with pytest.raises(FoxxServiceGetError): await bad_db.foxx.service(service_name) diff --git a/tests/test_graph.py b/tests/test_graph.py index 6d5fcbe..5d70255 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -56,10 +56,10 @@ async def test_graph_basic(db, bad_db): @pytest.mark.asyncio -async def test_graph_properties(db, bad_graph, cluster, enterprise): +async def test_graph_properties(db, bad_graph, cluster, skip_tests): # Create a graph name = generate_graph_name() - is_smart = cluster and enterprise + is_smart = cluster and "enterprise" not in skip_tests options = GraphOptions(number_of_shards=3) graph = await db.create_graph(name, is_smart=is_smart, options=options) diff --git a/tests/test_transaction.py b/tests/test_transaction.py index f7d7f76..1a7363c 100644 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -14,7 +14,10 @@ @pytest.mark.asyncio -async def test_transaction_execute_raw(db, doc_col, docs): +async def test_transaction_execute_raw(db, doc_col, docs, skip_tests): + if "js-transactions" in skip_tests: + pytest.skip("Skipping JS transaction tests") + # Test a valid JS transaction doc = docs[0] key = doc["_key"] From e4ade5552e3f1c758538cf322eb1aef6a3c0d3c7 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Mon, 15 Dec 2025 23:43:24 +0800 Subject: [PATCH 39/47] Option for skipping task test (#74) --- tests/conftest.py | 1 + tests/test_task.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index c09292d..f9b203f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -62,6 +62,7 @@ def pytest_addoption(parser): "jwt-secret-keyfile", # server was not configured with a keyfile "foxx", # foxx is not supported "js-transactions", # javascript transactions are not supported + "task", # tasks API "enterprise", # skip what used to be "enterprise-only" before 3.12 ], default=[], diff --git a/tests/test_task.py b/tests/test_task.py index 4e1aee6..008e25d 100644 --- a/tests/test_task.py +++ b/tests/test_task.py @@ -10,10 +10,13 @@ @pytest.mark.asyncio -async def test_task_management(sys_db, bad_db): +async def test_task_management(sys_db, bad_db, skip_tests): # This test intentionally uses the system database because cleaning up tasks is # easier there. + if "task" in skip_tests: + pytest.skip("Skipping task tests") + test_command = 'require("@arangodb").print(params);' # Test errors From eb4922abcb9b27c652d40a9fcbc41057da43e45a Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 19 Dec 2025 18:30:25 +0800 Subject: [PATCH 40/47] Access Tokens (#75) * Adding support for access tokens * Docs fix --- arangoasync/auth.py | 4 +- arangoasync/client.py | 8 +++- arangoasync/database.py | 94 +++++++++++++++++++++++++++++++++++++++ arangoasync/exceptions.py | 12 +++++ arangoasync/typings.py | 52 ++++++++++++++++++++++ tests/helpers.py | 9 ++++ tests/test_client.py | 56 ++++++++++++++++++++++- tests/test_typings.py | 26 +++++++++++ 8 files changed, 256 insertions(+), 5 deletions(-) diff --git a/arangoasync/auth.py b/arangoasync/auth.py index 96e9b1b..a4df28f 100644 --- a/arangoasync/auth.py +++ b/arangoasync/auth.py @@ -20,8 +20,8 @@ class Auth: encoding (str): Encoding for the password (default: utf-8) """ - username: str - password: str + username: str = "" + password: str = "" encoding: str = "utf-8" diff --git a/arangoasync/client.py b/arangoasync/client.py index 235cfae..b2eed10 100644 --- a/arangoasync/client.py +++ b/arangoasync/client.py @@ -147,7 +147,7 @@ async def db( self, name: str, auth_method: str = "basic", - auth: Optional[Auth] = None, + auth: Optional[Auth | str] = None, token: Optional[JwtToken] = None, verify: bool = False, compression: Optional[CompressionManager] = None, @@ -169,7 +169,8 @@ async def db( and client are synchronized. - "superuser": Superuser JWT authentication. The `token` parameter is required. The `auth` parameter is ignored. - auth (Auth | None): Login information. + auth (Auth | None): Login information (username and password) or + access token. token (JwtToken | None): JWT token. verify (bool): Verify the connection by sending a test request. compression (CompressionManager | None): If set, supersedes the @@ -188,6 +189,9 @@ async def db( """ connection: Connection + if isinstance(auth, str): + auth = Auth(password=auth) + if auth_method == "basic": if auth is None: raise ValueError("Basic authentication requires the `auth` parameter") diff --git a/arangoasync/database.py b/arangoasync/database.py index a28fa43..2cbbc68 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -17,6 +17,9 @@ from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND from arangoasync.exceptions import ( + AccessTokenCreateError, + AccessTokenDeleteError, + AccessTokenListError, AnalyzerCreateError, AnalyzerDeleteError, AnalyzerGetError, @@ -107,6 +110,7 @@ from arangoasync.result import Result from arangoasync.serialization import Deserializer, Serializer from arangoasync.typings import ( + AccessToken, CollectionInfo, CollectionType, DatabaseProperties, @@ -2130,6 +2134,96 @@ def response_handler(resp: Response) -> Json: return await self._executor.execute(request, response_handler) + async def create_access_token( + self, + user: str, + name: str, + valid_until: int, + ) -> Result[AccessToken]: + """Create an access token for the given user. + + Args: + user (str): The name of the user. + name (str): A name for the access token to make identification easier, + like a short description. + valid_until (int): A Unix timestamp in seconds to set the expiration date and time. + + Returns: + AccessToken: Information about the created access token, including the token itself. + + Raises: + AccessTokenCreateError: If the operation fails. + + References: + - `create-an-access-token `__ + """ # noqa: E501 + data: Json = { + "name": name, + "valid_until": valid_until, + } + + request = Request( + method=Method.POST, + endpoint=f"/_api/token/{user}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> AccessToken: + if not resp.is_success: + raise AccessTokenCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return AccessToken(result) + + return await self._executor.execute(request, response_handler) + + async def delete_access_token(self, user: str, token_id: int) -> None: + """List all access tokens for the given user. + + Args: + user (str): The name of the user. + token_id (int): The ID of the access token to delete. + + Raises: + AccessTokenDeleteError: If the operation fails. + + References: + - `delete-an-access-token `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, endpoint=f"/_api/token/{user}/{token_id}" + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise AccessTokenDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def list_access_tokens(self, user: str) -> Result[Jsons]: + """List all access tokens for the given user. + + Args: + user (str): The name of the user. + + Returns: + list: List of access tokens for the user. + + Raises: + AccessTokenListError: If the operation fails. + + References: + - `list-all-access-tokens `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/token/{user}") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AccessTokenListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Jsons, result["tokens"]) + + return await self._executor.execute(request, response_handler) + async def tls(self) -> Result[Json]: """Return TLS data (keyfile, clientCA). diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index a940e1b..58a9505 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -139,6 +139,18 @@ class AQLQueryValidateError(ArangoServerError): """Failed to parse and validate query.""" +class AccessTokenCreateError(ArangoServerError): + """Failed to create an access token.""" + + +class AccessTokenDeleteError(ArangoServerError): + """Failed to delete an access token.""" + + +class AccessTokenListError(ArangoServerError): + """Failed to retrieve access tokens.""" + + class AnalyzerCreateError(ArangoServerError): """Failed to create analyzer.""" diff --git a/arangoasync/typings.py b/arangoasync/typings.py index d49411d..0d85035 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -2024,3 +2024,55 @@ def __init__( @property def satellites(self) -> Optional[List[str]]: return cast(Optional[List[str]], self._data.get("satellites")) + + +class AccessToken(JsonWrapper): + """User access token. + + Example: + .. code-block:: json + + { + "id" : 1, + "name" : "Token for Service A", + "valid_until" : 1782864000, + "created_at" : 1765543306, + "fingerprint" : "v1...71227d", + "active" : true, + "token" : "v1.7b2265223a3137471227d" + } + + References: + - `create-an-access-token `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def active(self) -> bool: + return cast(bool, self._data["active"]) + + @property + def created_at(self) -> int: + return cast(int, self._data["created_at"]) + + @property + def fingerprint(self) -> str: + return cast(str, self._data["fingerprint"]) + + @property + def id(self) -> int: + return cast(int, self._data["id"]) + + @property + def name(self) -> str: + return cast(str, self._data["name"]) + + @property + def token(self) -> str: + return cast(str, self._data["token"]) + + @property + def valid_until(self) -> int: + return cast(int, self._data["valid_until"]) diff --git a/tests/helpers.py b/tests/helpers.py index 0e6e8a8..2bc04a5 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -89,3 +89,12 @@ def generate_service_mount(): str: Random service name. """ return f"/test_{uuid4().hex}" + + +def generate_token_name(): + """Generate and return a random token name. + + Returns: + str: Random token name. + """ + return f"test_token_{uuid4().hex}" diff --git a/tests/test_client.py b/tests/test_client.py index cbd96d4..2218384 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,12 +1,20 @@ +import time + import pytest from arangoasync.auth import JwtToken from arangoasync.client import ArangoClient from arangoasync.compression import DefaultCompressionManager -from arangoasync.exceptions import ServerEncryptionError +from arangoasync.exceptions import ( + AccessTokenCreateError, + AccessTokenDeleteError, + AccessTokenListError, + ServerEncryptionError, +) from arangoasync.http import DefaultHTTPClient from arangoasync.resolver import DefaultHostResolver, RoundRobinHostResolver from arangoasync.version import __version__ +from tests.helpers import generate_token_name @pytest.mark.asyncio @@ -152,3 +160,49 @@ async def test_client_jwt_superuser_auth( await client.db( sys_db_name, auth_method="superuser", auth=basic_auth_root, verify=True ) + + +@pytest.mark.asyncio +async def test_client_access_token(url, sys_db_name, basic_auth_root, bad_db): + username = basic_auth_root.username + + async with ArangoClient(hosts=url) as client: + # First login with basic auth + db_auth_basic = await client.db( + sys_db_name, + auth_method="basic", + auth=basic_auth_root, + verify=True, + ) + + # Create an access token + token_name = generate_token_name() + token = await db_auth_basic.create_access_token( + user=username, name=token_name, valid_until=int(time.time() + 3600) + ) + assert token.active is True + + # Cannot create a token with the same name + with pytest.raises(AccessTokenCreateError): + await db_auth_basic.create_access_token( + user=username, name=token_name, valid_until=int(time.time() + 3600) + ) + + # Authenticate with the created token + access_token_db = await client.db( + sys_db_name, + auth_method="basic", + auth=token.token, + verify=True, + ) + + # List access tokens + tokens = await access_token_db.list_access_tokens(username) + assert isinstance(tokens, list) + with pytest.raises(AccessTokenListError): + await bad_db.list_access_tokens(username) + + # Clean up - delete the created token + await access_token_db.delete_access_token(username, token.id) + with pytest.raises(AccessTokenDeleteError): + await access_token_db.delete_access_token(username, token.id) diff --git a/tests/test_typings.py b/tests/test_typings.py index 3b4e5e2..48e9eb0 100644 --- a/tests/test_typings.py +++ b/tests/test_typings.py @@ -1,6 +1,7 @@ import pytest from arangoasync.typings import ( + AccessToken, CollectionInfo, CollectionStatistics, CollectionStatus, @@ -446,3 +447,28 @@ def test_CollectionStatistics(): assert stats.key_options["type"] == "traditional" assert stats.computed_values is None assert stats.object_id == "69124" + + +def test_AccessToken(): + data = { + "active": True, + "created_at": 1720000000, + "fingerprint": "abc123fingerprint", + "id": 42, + "name": "ci-token", + "token": "v2.local.eyJhbGciOi...", + "valid_until": 1720003600, + } + + access_token = AccessToken(data) + + assert access_token.active is True + assert access_token.created_at == 1720000000 + assert access_token.fingerprint == "abc123fingerprint" + assert access_token.id == 42 + assert access_token.name == "ci-token" + assert access_token.token == "v2.local.eyJhbGciOi..." + assert access_token.valid_until == 1720003600 + + # JsonWrapper behavior + assert access_token.to_dict() == data From e833767d03fd4b99771f6d249cbf20d25c789c2a Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Fri, 19 Dec 2025 18:52:05 +0800 Subject: [PATCH 41/47] Adding test parameters for foxx service path and backup path (#76) --- tests/conftest.py | 26 ++++++++++++++++++++++++++ tests/test_backup.py | 10 +++++----- tests/test_foxx.py | 15 +++++++-------- 3 files changed, 38 insertions(+), 13 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index f9b203f..5025142 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -28,6 +28,8 @@ class GlobalData: username: str = generate_username() cluster: bool = False skip: list[str] = None + foxx_path: str = None + backup_path: str = None db_version: version.Version = version.parse("0.0.0") @@ -53,6 +55,18 @@ def pytest_addoption(parser): parser.addoption( "--cluster", action="store_true", help="Run tests in a cluster setup" ) + parser.addoption( + "--foxx-path", + action="store", + default="/tests/static/service.zip", + help="Foxx tests service path", + ) + parser.addoption( + "--backup-path", + action="store", + default="local://tmp", + help="Backup tests repository path", + ) parser.addoption( "--skip", action="store", @@ -82,6 +96,8 @@ def pytest_configure(config): global_data.token = JwtToken.generate_token(global_data.secret) global_data.cluster = config.getoption("cluster") global_data.skip = config.getoption("skip") + global_data.backup_path = config.getoption("backup_path") + global_data.foxx_path = config.getoption("foxx_path") global_data.graph_name = generate_graph_name() async def get_db_version(): @@ -123,6 +139,16 @@ def cluster(): return global_data.cluster +@pytest.fixture +def backup_path(): + return global_data.backup_path + + +@pytest.fixture +def foxx_path(): + return global_data.foxx_path + + @pytest.fixture def skip_tests(): return global_data.skip diff --git a/tests/test_backup.py b/tests/test_backup.py index 3bb5492..7e6e37e 100644 --- a/tests/test_backup.py +++ b/tests/test_backup.py @@ -6,7 +6,9 @@ @pytest.mark.asyncio -async def test_backup(url, sys_db_name, bad_db, token, cluster, db_version, skip_tests): +async def test_backup( + url, sys_db_name, bad_db, token, cluster, db_version, skip_tests, backup_path +): if "enterprise" in skip_tests: pytest.skip("Backup API is only available in ArangoDB Enterprise Edition") if not cluster: @@ -35,10 +37,8 @@ async def test_backup(url, sys_db_name, bad_db, token, cluster, db_version, skip result = await backup.restore(backup_id) assert "previous" in result config = {"local": {"type": "local"}} - result = await backup.upload(backup_id, repository="local://tmp", config=config) + result = await backup.upload(backup_id, repository=backup_path, config=config) assert "uploadId" in result - result = await backup.download( - backup_id, repository="local://tmp", config=config - ) + result = await backup.download(backup_id, repository=backup_path, config=config) assert "downloadId" in result await backup.delete(backup_id) diff --git a/tests/test_foxx.py b/tests/test_foxx.py index c407215..e972dc2 100644 --- a/tests/test_foxx.py +++ b/tests/test_foxx.py @@ -30,12 +30,11 @@ ) from tests.helpers import generate_service_mount -service_file = "/tests/static/service.zip" service_name = "test" @pytest.mark.asyncio -async def test_foxx(db, bad_db, skip_tests): +async def test_foxx(db, bad_db, skip_tests, foxx_path): if "foxx" in skip_tests: pytest.skip("Skipping Foxx tests") @@ -90,7 +89,7 @@ async def test_foxx(db, bad_db, skip_tests): # Service as a path mount1 = generate_service_mount() service1 = { - "source": service_file, + "source": foxx_path, "configuration": {"LOG_LEVEL": "info"}, "dependencies": {}, } @@ -102,7 +101,7 @@ async def test_foxx(db, bad_db, skip_tests): service2 = aiohttp.FormData() service2.add_field( "source", - open(f".{service_file}", "rb"), + open(f".{foxx_path}", "rb"), filename="service.zip", content_type="application/zip", ) @@ -115,7 +114,7 @@ async def test_foxx(db, bad_db, skip_tests): # Service as raw data mount3 = generate_service_mount() - async with aiofiles.open(f".{service_file}", mode="rb") as f: + async with aiofiles.open(f".{foxx_path}", mode="rb") as f: service3 = await f.read() service_info = await db.foxx.create_service( mount=mount3, service=service3, headers={"content-type": "application/zip"} @@ -127,14 +126,14 @@ async def test_foxx(db, bad_db, skip_tests): # Replace service service4 = { - "source": service_file, + "source": foxx_path, "configuration": {"LOG_LEVEL": "info"}, "dependencies": {}, } service_info = await db.foxx.replace_service(mount=mount2, service=service4) assert service_info["mount"] == mount2 - async with aiofiles.open(f".{service_file}", mode="rb") as f: + async with aiofiles.open(f".{foxx_path}", mode="rb") as f: service5 = await f.read() service_info = await db.foxx.replace_service( mount=mount1, service=service5, headers={"content-type": "application/zip"} @@ -143,7 +142,7 @@ async def test_foxx(db, bad_db, skip_tests): # Update service service6 = { - "source": service_file, + "source": foxx_path, "configuration": {"LOG_LEVEL": "debug"}, "dependencies": {}, } From 45cee455ad2bae8af6a8017b4d385346403c839a Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 20 Dec 2025 23:20:30 +0800 Subject: [PATCH 42/47] No longer using --enterprise option --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66044c4..f2899c8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,7 +10,7 @@ pre-commit install # Install git pre-commit hooks Run unit tests with coverage: ```shell -pytest --enterprise --cluster --cov=arango --cov-report=html # Open htmlcov/index.html in your browser +pytest --cluster --cov=arango --cov-report=html # Open htmlcov/index.html in your browser ``` To start and ArangoDB instance locally, run: From ded4e013c4a557847e92a2a97eff1704d25e331a Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 20 Dec 2025 23:21:52 +0800 Subject: [PATCH 43/47] Image update --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b71ba0b..c7f0fdd 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ executors: resource_class: small python-vm: machine: - image: ubuntu-2204:current + image: ubuntu-2404:current resource_class: medium workflows: From 50cbb5c1f9ec06f2ffb3a4a055163a8d9b4baa15 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 20 Dec 2025 23:31:43 +0800 Subject: [PATCH 44/47] Bump driver version --- arangoasync/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arangoasync/version.py b/arangoasync/version.py index 976498a..92192ee 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "1.0.3" +__version__ = "1.0.4" From 039579b40c30e706f1be075cb2fa135f132c0917 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 20 Dec 2025 23:49:00 +0800 Subject: [PATCH 45/47] Changed docs URL (#77) --- README.md | 2 +- arangoasync/aql.py | 38 ++++---- arangoasync/backup.py | 12 +-- arangoasync/cluster.py | 24 ++--- arangoasync/collection.py | 90 ++++++++--------- arangoasync/cursor.py | 6 +- arangoasync/database.py | 192 ++++++++++++++++++------------------- arangoasync/foxx.py | 42 ++++---- arangoasync/graph.py | 38 ++++---- arangoasync/job.py | 10 +- arangoasync/replication.py | 14 +-- arangoasync/typings.py | 46 ++++----- docs/analyzer.rst | 2 +- docs/aql.rst | 2 +- docs/backup.rst | 2 +- docs/certificates.rst | 2 +- docs/cluster.rst | 2 +- docs/document.rst | 2 +- docs/foxx.rst | 2 +- docs/graph.rst | 2 +- docs/indexes.rst | 2 +- docs/overview.rst | 2 +- docs/view.rst | 6 +- 23 files changed, 270 insertions(+), 270 deletions(-) diff --git a/README.md b/README.md index b80d633..1232efa 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ async def main(): student_names.append(doc["name"]) ``` -Another example with [graphs](https://docs.arangodb.com/stable/graphs/): +Another example with [graphs](https://docs.arango.ai/stable/graphs/): ```python async def main(): diff --git a/arangoasync/aql.py b/arangoasync/aql.py index 1fad880..ec8efe4 100644 --- a/arangoasync/aql.py +++ b/arangoasync/aql.py @@ -78,7 +78,7 @@ async def entries(self) -> Result[Jsons]: AQLCacheEntriesError: If retrieval fails. References: - - `list-the-entries-of-the-aql-query-results-cache `__ + - `list-the-entries-of-the-aql-query-results-cache `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-cache/entries") @@ -99,7 +99,7 @@ async def plan_entries(self) -> Result[Jsons]: AQLCacheEntriesError: If retrieval fails. References: - - `list-the-entries-of-the-aql-query-plan-cache `__ + - `list-the-entries-of-the-aql-query-plan-cache `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-plan-cache") @@ -117,7 +117,7 @@ async def clear(self) -> Result[None]: AQLCacheClearError: If clearing the cache fails. References: - - `clear-the-aql-query-results-cache `__ + - `clear-the-aql-query-results-cache `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint="/_api/query-cache") @@ -134,7 +134,7 @@ async def clear_plan(self) -> Result[None]: AQLCacheClearError: If clearing the cache fails. References: - - `clear-the-aql-query-plan-cache `__ + - `clear-the-aql-query-plan-cache `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint="/_api/query-plan-cache") @@ -154,7 +154,7 @@ async def properties(self) -> Result[QueryCacheProperties]: AQLCachePropertiesError: If retrieval fails. References: - - `get-the-aql-query-results-cache-configuration `__ + - `get-the-aql-query-results-cache-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-cache/properties") @@ -193,7 +193,7 @@ async def configure( AQLCacheConfigureError: If setting the configuration fails. References: - - `set-the-aql-query-results-cache-configuration `__ + - `set-the-aql-query-results-cache-configuration `__ """ # noqa: E501 data: Json = dict() if mode is not None: @@ -298,7 +298,7 @@ async def execute( Cursor: Result cursor. References: - - `create-a-cursor `__ + - `create-a-cursor `__ """ # noqa: E501 data: Json = dict(query=query) if count is not None: @@ -353,7 +353,7 @@ async def tracking(self) -> Result[QueryTrackingConfiguration]: AQLQueryTrackingGetError: If retrieval fails. References: - - `get-the-aql-query-tracking-configuration `__ + - `get-the-aql-query-tracking-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query/properties") @@ -397,7 +397,7 @@ async def set_tracking( AQLQueryTrackingSetError: If setting the configuration fails. References: - - `update-the-aql-query-tracking-configuration `__ + - `update-the-aql-query-tracking-configuration `__ """ # noqa: E501 data: Json = dict() @@ -462,7 +462,7 @@ async def queries(self, all_queries: bool = False) -> Result[Jsons]: AQLQueryListError: If retrieval fails. References: - - `list-the-running-queries `__ + - `list-the-running-queries `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -493,7 +493,7 @@ async def slow_queries(self, all_queries: bool = False) -> Result[Jsons]: AQLQueryListError: If retrieval fails. References: - - `list-the-slow-aql-queries `__ + - `list-the-slow-aql-queries `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -523,7 +523,7 @@ async def clear_slow_queries(self, all_queries: bool = False) -> Result[None]: AQLQueryClearError: If retrieval fails. References: - - `clear-the-list-of-slow-aql-queries `__ + - `clear-the-list-of-slow-aql-queries `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -560,7 +560,7 @@ async def kill( AQLQueryKillError: If killing the query fails. References: - - `kill-a-running-aql-query `__ + - `kill-a-running-aql-query `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -598,7 +598,7 @@ async def explain( AQLQueryExplainError: If retrieval fails. References: - - `explain-an-aql-query `__ + - `explain-an-aql-query `__ """ # noqa: E501 data: Json = dict(query=query) if bind_vars is not None: @@ -634,7 +634,7 @@ async def validate(self, query: str) -> Result[Json]: AQLQueryValidateError: If validation fails. References: - - `parse-an-aql-query `__ + - `parse-an-aql-query `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -659,7 +659,7 @@ async def query_rules(self) -> Result[Jsons]: AQLQueryRulesGetError: If retrieval fails. References: - - `list-all-aql-optimizer-rules `__ + - `list-all-aql-optimizer-rules `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query/rules") @@ -684,7 +684,7 @@ async def functions(self, namespace: Optional[str] = None) -> Result[Jsons]: AQLFunctionListError: If retrieval fails. References: - - `list-the-registered-user-defined-aql-functions `__ + - `list-the-registered-user-defined-aql-functions `__ """ # noqa: E501 params: Json = dict() if namespace is not None: @@ -726,7 +726,7 @@ async def create_function( AQLFunctionCreateError: If registration fails. References: - - `create-a-user-defined-aql-function `__ + - `create-a-user-defined-aql-function `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -765,7 +765,7 @@ async def delete_function( AQLFunctionDeleteError: If removal fails. References: - - `remove-a-user-defined-aql-function `__ + - `remove-a-user-defined-aql-function `__ """ # noqa: E501 params: Json = dict() if group is not None: diff --git a/arangoasync/backup.py b/arangoasync/backup.py index 75a26a6..e0847e0 100644 --- a/arangoasync/backup.py +++ b/arangoasync/backup.py @@ -49,7 +49,7 @@ async def get(self, backup_id: Optional[str] = None) -> Result[Json]: BackupGetError: If the operation fails. References: - - `list-backups `__ + - `list-backups `__ """ # noqa: E501 data: Json = {} if backup_id is not None: @@ -97,7 +97,7 @@ async def create( BackupCreateError: If the backup creation fails. References: - - `create-backup `__ + - `create-backup `__ """ # noqa: E501 data: Json = {} if label is not None: @@ -137,7 +137,7 @@ async def restore(self, backup_id: str) -> Result[Json]: BackupRestoreError: If the restore operation fails. References: - - `restore-backup `__ + - `restore-backup `__ """ # noqa: E501 data: Json = {"id": backup_id} request = Request( @@ -165,7 +165,7 @@ async def delete(self, backup_id: str) -> None: BackupDeleteError: If the delete operation fails. References: - - `delete-backup `__ + - `delete-backup `__ """ # noqa: E501 data: Json = {"id": backup_id} request = Request( @@ -209,7 +209,7 @@ async def upload( BackupUploadError: If upload operation fails. References: - - `upload-a-backup-to-a-remote-repository `__ + - `upload-a-backup-to-a-remote-repository `__ """ # noqa: E501 data: Json = {} if upload_id is not None: @@ -265,7 +265,7 @@ async def download( BackupDownloadError: If the download operation fails. References: - - `download-a-backup-from-a-remote-repository `__ + - `download-a-backup-from-a-remote-repository `__ """ # noqa: E501 data: Json = {} if download_id is not None: diff --git a/arangoasync/cluster.py b/arangoasync/cluster.py index ce33b92..39e3d56 100644 --- a/arangoasync/cluster.py +++ b/arangoasync/cluster.py @@ -45,7 +45,7 @@ async def health(self) -> Result[Json]: ClusterHealthError: If retrieval fails. References: - - `get-the-cluster-health `__ + - `get-the-cluster-health `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -74,7 +74,7 @@ async def statistics(self, db_server: str) -> Result[Json]: ClusterStatisticsError: If retrieval fails. References: - - `get-the-statistics-of-a-db-server `__ + - `get-the-statistics-of-a-db-server `__ """ # noqa: E501 params: Params = {"DBserver": db_server} @@ -103,7 +103,7 @@ async def endpoints(self) -> Result[List[str]]: ClusterEndpointsError: If retrieval fails. References: - - `list-all-coordinator-endpoints `__ + - `list-all-coordinator-endpoints `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -129,7 +129,7 @@ async def server_id(self) -> Result[str]: ClusterServerIDError: If retrieval fails. References: - - `get-the-server-id `__ + - `get-the-server-id `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -154,7 +154,7 @@ async def server_role(self) -> Result[str]: ClusterServerRoleError: If retrieval fails. References: - - `get-the-server-role `__ + - `get-the-server-role `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -182,7 +182,7 @@ async def toggle_maintenance_mode(self, mode: str) -> Result[Json]: ClusterMaintenanceModeError: If the toggle operation fails. References: - - `toggle-cluster-maintenance-mode `__ + - `toggle-cluster-maintenance-mode `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -212,7 +212,7 @@ async def server_maintenance_mode(self, server_id: str) -> Result[Json]: ClusterMaintenanceModeError: If retrieval fails. References: - - `get-the-maintenance-status-of-a-db-server `__ + - `get-the-maintenance-status-of-a-db-server `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -242,7 +242,7 @@ async def toggle_server_maintenance_mode( ClusterMaintenanceModeError: If the operation fails. References: - - `set-the-maintenance-status-of-a-db-server `__ + - `set-the-maintenance-status-of-a-db-server `__ """ # noqa: E501 data: Json = {"mode": mode} if timeout is not None: @@ -271,7 +271,7 @@ async def calculate_imbalance(self) -> Result[Json]: ClusterRebalanceError: If retrieval fails. References: - - `get-the-current-cluster-imbalance `__ + - `get-the-current-cluster-imbalance `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/cluster/rebalance") @@ -315,7 +315,7 @@ async def calculate_rebalance_plan( ClusterRebalanceError: If retrieval fails. References: - - `compute-a-set-of-move-shard-operations-to-improve-balance `__ + - `compute-a-set-of-move-shard-operations-to-improve-balance `__ """ # noqa: E501 data: Json = dict(version=version) if databases_excluded is not None: @@ -380,7 +380,7 @@ async def rebalance( ClusterRebalanceError: If retrieval fails. References: - - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ + - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ """ # noqa: E501 data: Json = dict(version=version) if databases_excluded is not None: @@ -431,7 +431,7 @@ async def execute_rebalance_plan( ClusterRebalanceError: If the execution fails. References: - - `execute-a-set-of-move-shard-operations `__ + - `execute-a-set-of-move-shard-operations `__ """ # noqa: E501 data: Json = dict(version=version, moves=moves) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index 52a9d9e..fae501a 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -333,7 +333,7 @@ async def indexes( IndexListError: If retrieval fails. References: - - `list-all-indexes-of-a-collection `__ + - `list-all-indexes-of-a-collection `__ """ # noqa: E501 params: Params = dict(collection=self._name) if with_stats is not None: @@ -368,7 +368,7 @@ async def get_index(self, id: str | int) -> Result[IndexProperties]: IndexGetError: If retrieval fails. References: - `get-an-index `__ + `get-an-index `__ """ # noqa: E501 if isinstance(id, int): full_id = f"{self._name}/{id}" @@ -408,12 +408,12 @@ async def add_index( IndexCreateError: If index creation fails. References: - - `create-an-index `__ - - `create-a-persistent-index `__ - - `create-an-inverted-index `__ - - `create-a-ttl-index `__ - - `create-a-multi-dimensional-index `__ - - `create-a-geo-spatial-index `__ + - `create-an-index `__ + - `create-a-persistent-index `__ + - `create-an-inverted-index `__ + - `create-a-ttl-index `__ + - `create-a-multi-dimensional-index `__ + - `create-a-geo-spatial-index `__ """ # noqa: E501 options = options or {} request = Request( @@ -447,7 +447,7 @@ async def delete_index( IndexDeleteError: If deletion fails. References: - - `delete-an-index `__ + - `delete-an-index `__ """ # noqa: E501 if isinstance(id, int): full_id = f"{self._name}/{id}" @@ -478,7 +478,7 @@ async def load_indexes(self) -> Result[bool]: IndexLoadError: If loading fails. References: - - `load-collection-indexes-into-memory `__ + - `load-collection-indexes-into-memory `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -499,7 +499,7 @@ async def recalculate_count(self) -> None: CollectionRecalculateCountError: If re-calculation fails. References: - - `recalculate-the-document-count-of-a-collection `__ + - `recalculate-the-document-count-of-a-collection `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -522,7 +522,7 @@ async def properties(self) -> Result[CollectionProperties]: CollectionPropertiesError: If retrieval fails. References: - - `get-the-properties-of-a-collection `__ + - `get-the-properties-of-a-collection `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -572,7 +572,7 @@ async def configure( CollectionConfigureError: If configuration fails. References: - - `change-the-properties-of-a-collection `__ + - `change-the-properties-of-a-collection `__ """ # noqa: E501 data: Json = {} if cache_enabled is not None: @@ -617,7 +617,7 @@ async def rename(self, new_name: str) -> None: CollectionRenameError: If rename fails. References: - - `rename-a-collection `__ + - `rename-a-collection `__ """ # noqa: E501 data: Json = {"name": new_name} request = Request( @@ -644,7 +644,7 @@ async def compact(self) -> Result[CollectionInfo]: CollectionCompactError: If compaction fails. References: - - `compact-a-collection `__ + - `compact-a-collection `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -677,7 +677,7 @@ async def truncate( CollectionTruncateError: If truncation fails. References: - - `truncate-a-collection `__ + - `truncate-a-collection `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -707,7 +707,7 @@ async def count(self) -> Result[int]: DocumentCountError: If retrieval fails. References: - - `get-the-document-count-of-a-collection `__ + - `get-the-document-count-of-a-collection `__ """ # noqa: E501 request = Request( method=Method.GET, endpoint=f"/_api/collection/{self.name}/count" @@ -731,7 +731,7 @@ async def statistics(self) -> Result[CollectionStatistics]: CollectionStatisticsError: If retrieval fails. References: - - `get-the-collection-statistics `__ + - `get-the-collection-statistics `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -761,7 +761,7 @@ async def responsible_shard(self, document: Json) -> Result[str]: CollectionResponsibleShardError: If retrieval fails. References: - - `get-the-responsible-shard-for-a-document `__ + - `get-the-responsible-shard-for-a-document `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -793,7 +793,7 @@ async def shards(self, details: Optional[bool] = None) -> Result[Json]: CollectionShardsError: If retrieval fails. References: - - `get-the-shard-ids-of-a-collection `__ + - `get-the-shard-ids-of-a-collection `__ """ # noqa: E501 params: Params = {} if details is not None: @@ -822,7 +822,7 @@ async def revision(self) -> Result[str]: CollectionRevisionError: If retrieval fails. References: - - `get-the-collection-revision-id `__ + - `get-the-collection-revision-id `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -852,7 +852,7 @@ async def checksum( CollectionChecksumError: If retrieval fails. References: - - `get-the-collection-checksum `__ + - `get-the-collection-checksum `__ """ # noqa: E501 params: Params = {} if with_rev is not None: @@ -899,7 +899,7 @@ async def has( DocumentGetError: If retrieval fails. References: - - `get-a-document-header `__ + - `get-a-document-header `__ """ # noqa: E501 handle = self._get_doc_id(document) @@ -956,7 +956,7 @@ async def get_many( DocumentGetError: If retrieval fails. References: - - `get-multiple-documents `__ + - `get-multiple-documents `__ """ # noqa: E501 params: Params = {"onlyget": True} if ignore_revs is not None: @@ -1283,7 +1283,7 @@ async def insert_many( DocumentInsertError: If insertion fails. References: - - `create-multiple-documents `__ + - `create-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1373,7 +1373,7 @@ async def replace_many( DocumentReplaceError: If replacing fails. References: - - `replace-multiple-documents `__ + - `replace-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1466,7 +1466,7 @@ async def update_many( DocumentUpdateError: If update fails. References: - - `update-multiple-documents `__ + - `update-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1548,7 +1548,7 @@ async def delete_many( DocumentRemoveError: If removal fails. References: - - `remove-multiple-documents `__ + - `remove-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1640,7 +1640,7 @@ async def import_bulk( DocumentInsertError: If import fails. References: - - `import-json-data-as-documents `__ + - `import-json-data-as-documents `__ """ # noqa: E501 params: Params = dict() params["collection"] = self.name @@ -1730,7 +1730,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-a-document `__ + - `get-a-document `__ """ # noqa: E501 handle = self._get_doc_id(document) @@ -1818,7 +1818,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-a-document `__ + - `create-a-document `__ """ # noqa: E501 if isinstance(document, dict): document = cast(T, self._ensure_key_from_id(document)) @@ -1923,7 +1923,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-a-document `__ + - `update-a-document `__ """ # noqa: E501 params: Params = {} if ignore_revs is not None: @@ -2017,7 +2017,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-a-document `__ + - `replace-a-document `__ """ # noqa: E501 params: Params = {} if ignore_revs is not None: @@ -2105,7 +2105,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-a-document `__ + - `remove-a-document `__ """ # noqa: E501 handle = self._get_doc_id(cast(str | Json, document)) @@ -2232,7 +2232,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-a-vertex `__ + - `get-a-vertex `__ """ # noqa: E501 handle = self._get_doc_id(vertex) @@ -2294,7 +2294,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-a-vertex `__ + - `create-a-vertex `__ """ # noqa: E501 if isinstance(vertex, dict): vertex = cast(T, self._ensure_key_from_id(vertex)) @@ -2359,7 +2359,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-a-vertex `__ + - `update-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2434,7 +2434,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-a-vertex `__ + - `replace-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2506,7 +2506,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-a-vertex `__ + - `remove-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2631,7 +2631,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-an-edge `__ + - `get-an-edge `__ """ # noqa: E501 handle = self._get_doc_id(edge) @@ -2694,7 +2694,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-an-edge `__ + - `create-an-edge `__ """ # noqa: E501 if isinstance(edge, dict): edge = cast(T, self._ensure_key_from_id(edge)) @@ -2763,7 +2763,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-an-edge `__ + - `update-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2842,7 +2842,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-an-edge `__ + - `replace-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2917,7 +2917,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-an-edge `__ + - `remove-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2978,7 +2978,7 @@ async def edges( EdgeListError: If retrieval fails. References: - - `get-inbound-and-outbound-edges `__ + - `get-inbound-and-outbound-edges `__ """ # noqa: E501 params: Params = { "vertex": self._get_doc_id(vertex, validate=False), diff --git a/arangoasync/cursor.py b/arangoasync/cursor.py index 5339455..1e3cc6c 100644 --- a/arangoasync/cursor.py +++ b/arangoasync/cursor.py @@ -192,8 +192,8 @@ async def fetch(self, batch_id: Optional[str] = None) -> List[Any]: CursorStateError: If the cursor ID is not set. References: - - `read-the-next-batch-from-a-cursor `__ - - `read-a-batch-from-the-cursor-again `__ + - `read-the-next-batch-from-a-cursor `__ + - `read-a-batch-from-the-cursor-again `__ """ # noqa: E501 if self._id is None: raise CursorStateError("Cursor ID is not set") @@ -229,7 +229,7 @@ async def close(self, ignore_missing: bool = False) -> bool: CursorCloseError: If the cursor failed to close. References: - - `delete-a-cursor `__ + - `delete-a-cursor `__ """ # noqa: E501 if self._id is None: return False diff --git a/arangoasync/database.py b/arangoasync/database.py index 2cbbc68..8e700e5 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -258,7 +258,7 @@ async def properties(self) -> Result[DatabaseProperties]: DatabasePropertiesError: If retrieval fails. References: - - `get-information-about-the-current-database `__ + - `get-information-about-the-current-database `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database/current") @@ -281,7 +281,7 @@ async def status(self) -> Result[ServerStatusInformation]: ServerSatusError: If retrieval fails. References: - - `get-server-status-information `__ + - `get-server-status-information `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/status") @@ -305,7 +305,7 @@ async def databases(self) -> Result[List[str]]: DatabaseListError: If retrieval fails. References: - - `list-all-databases `__ + - `list-all-databases `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database") @@ -333,7 +333,7 @@ async def databases_accessible_to_user(self) -> Result[List[str]]: DatabaseListError: If retrieval fails. References: - - `list-the-accessible-databases `__ + - `list-the-accessible-databases `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database/user") @@ -417,7 +417,7 @@ async def create_database( DatabaseCreateError: If creation fails. References: - - `create-a-database `__ + - `create-a-database `__ """ # noqa: E501 data: Json = {"name": name} @@ -478,7 +478,7 @@ async def delete_database( DatabaseDeleteError: If deletion fails. References: - - `drop-a-database `__ + - `drop-a-database `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/database/{name}") @@ -533,7 +533,7 @@ async def collections( CollectionListError: If retrieval fails. References: - - `list-all-collections `__ + - `list-all-collections `__ """ # noqa: E501 params: Params = {} if exclude_system is not None: @@ -661,7 +661,7 @@ async def create_collection( CollectionCreateError: If the operation fails. References: - - `create-a-collection `__ + - `create-a-collection `__ """ # noqa: E501 data: Json = {"name": name} if col_type is not None: @@ -751,7 +751,7 @@ async def delete_collection( CollectionDeleteError: If the operation fails. References: - - `drop-a-collection `__ + - `drop-a-collection `__ """ # noqa: E501 params: Params = {} if is_system is not None: @@ -782,7 +782,7 @@ async def key_generators(self) -> Result[List[str]]: CollectionKeyGeneratorsError: If retrieval fails. References: - - `get-the-available-key-generators `__ + - `get-the-available-key-generators `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/key-generators") @@ -821,7 +821,7 @@ async def has_document( DocumentGetError: If retrieval fails. References: - - `get-a-document-header `__ + - `get-a-document-header `__ """ # noqa: E501 col = Collection.get_col_name(document) return await self.collection(col).has( @@ -858,7 +858,7 @@ async def document( DocumentParseError: If the document is malformed. References: - - `get-a-document `__ + - `get-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -927,7 +927,7 @@ async def insert_document( DocumentParseError: If the document is malformed. References: - - `create-a-document `__ + - `create-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection(collection) return await col.insert( @@ -998,7 +998,7 @@ async def update_document( DocumentUpdateError: If update fails. References: - - `update-a-document `__ + - `update-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -1063,7 +1063,7 @@ async def replace_document( DocumentReplaceError: If replace fails. References: - - `replace-a-document `__ + - `replace-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -1124,7 +1124,7 @@ async def delete_document( DocumentDeleteError: If deletion fails. References: - - `remove-a-document `__ + - `remove-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -1198,7 +1198,7 @@ async def graphs(self) -> Result[List[GraphProperties]]: GraphListError: If the operation fails. References: - - `list-all-graphs `__ + - `list-all-graphs `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/gharial") @@ -1253,7 +1253,7 @@ async def create_graph( GraphCreateError: If the operation fails. References: - - `create-a-graph `__ + - `create-a-graph `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1315,7 +1315,7 @@ async def delete_graph( GraphDeleteError: If the operation fails. References: - - `drop-a-graph `__ + - `drop-a-graph `__ """ # noqa: E501 params: Params = {} if drop_collections is not None: @@ -1347,8 +1347,8 @@ async def view(self, name: str) -> Result[Json]: ViewGetError: If the operation fails. References: - - `read-properties-of-a-view `__ - - `get-the-properties-of-a-view `__ + - `read-properties-of-a-view `__ + - `get-the-properties-of-a-view `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/view/{name}/properties") @@ -1372,8 +1372,8 @@ async def view_info(self, name: str) -> Result[Json]: ViewGetError: If the operation fails. References: - - `get-information-about-a-view `_ - - `get-information-about-a-view `__ + - `get-information-about-a-view `_ + - `get-information-about-a-view `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/view/{name}") @@ -1394,8 +1394,8 @@ async def views(self) -> Result[Jsons]: ViewListError: If the operation fails. References: - - `list-all-views `__ - - `list-all-views `__ + - `list-all-views `__ + - `list-all-views `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/view") @@ -1427,8 +1427,8 @@ async def create_view( ViewCreateError: If the operation fails. References: - - `create-a-search-alias-view `__ - - `create-an-arangosearch-view `__ + - `create-a-search-alias-view `__ + - `create-an-arangosearch-view `__ """ # noqa: E501 data: Json = {"name": name, "type": view_type} if properties is not None: @@ -1461,8 +1461,8 @@ async def replace_view(self, name: str, properties: Json) -> Result[Json]: ViewReplaceError: If the operation fails. References: - - `replace-the-properties-of-a-search-alias-view `__ - - `replace-the-properties-of-an-arangosearch-view `__ + - `replace-the-properties-of-a-search-alias-view `__ + - `replace-the-properties-of-an-arangosearch-view `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -1491,8 +1491,8 @@ async def update_view(self, name: str, properties: Json) -> Result[Json]: ViewUpdateError: If the operation fails. References: - - `update-the-properties-of-a-search-alias-view `__ - - `update-the-properties-of-an-arangosearch-view `__ + - `update-the-properties-of-a-search-alias-view `__ + - `update-the-properties-of-an-arangosearch-view `__ """ # noqa: E501 request = Request( method=Method.PATCH, @@ -1518,8 +1518,8 @@ async def rename_view(self, name: str, new_name: str) -> None: ViewRenameError: If the operation fails. References: - - `rename-a-view `__ - - `rename-a-view `__ + - `rename-a-view `__ + - `rename-a-view `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -1551,8 +1551,8 @@ async def delete_view( ViewDeleteError: If the operation fails. References: - - `drop-a-view `__ - - `drop-a-view `__ + - `drop-a-view `__ + - `drop-a-view `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/view/{name}") @@ -1575,7 +1575,7 @@ async def analyzers(self) -> Result[Jsons]: AnalyzerListError: If the operation fails. References: - - `list-all-analyzers `__ + - `list-all-analyzers `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/analyzer") @@ -1597,7 +1597,7 @@ async def analyzer(self, name: str) -> Result[Json]: dict: Analyzer properties. References: - - `get-an-analyzer-definition `__ + - `get-an-analyzer-definition `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/analyzer/{name}") @@ -1632,7 +1632,7 @@ async def create_analyzer( AnalyzerCreateError: If the operation fails. References: - - `create-an-analyzer `__ + - `create-an-analyzer `__ """ # noqa: E501 data: Json = {"name": name, "type": analyzer_type} if properties is not None: @@ -1671,7 +1671,7 @@ async def delete_analyzer( AnalyzerDeleteError: If the operation fails. References: - - `remove-an-analyzer `__ + - `remove-an-analyzer `__ """ # noqa: E501 params: Params = {} if force is not None: @@ -1728,7 +1728,7 @@ async def user(self, username: str) -> Result[UserInfo]: UserGetError: If the operation fails. References: - - `get-a-user` `__ + - `get-a-user` `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/user/{username}") @@ -1757,7 +1757,7 @@ async def users(self) -> Result[Sequence[UserInfo]]: UserListError: If the operation fails. References: - - `list-available-users `__ + - `list-available-users `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/user") @@ -1792,7 +1792,7 @@ async def create_user(self, user: UserInfo | Json) -> Result[UserInfo]: await db.create_user({user="john", password="secret"}) References: - - `create-a-user `__ + - `create-a-user `__ """ # noqa: E501 if isinstance(user, dict): user = UserInfo(**user) @@ -1832,7 +1832,7 @@ async def replace_user(self, user: UserInfo | Json) -> Result[UserInfo]: UserReplaceError: If the operation fails. References: - - `replace-a-user `__ + - `replace-a-user `__ """ # noqa: E501 if isinstance(user, dict): user = UserInfo(**user) @@ -1872,7 +1872,7 @@ async def update_user(self, user: UserInfo | Json) -> Result[UserInfo]: UserUpdateError: If the operation fails. References: - - `update-a-user `__ + - `update-a-user `__ """ # noqa: E501 if isinstance(user, dict): user = UserInfo(**user) @@ -1917,7 +1917,7 @@ async def delete_user( UserDeleteError: If the operation fails. References: - - `remove-a-user `__ + - `remove-a-user `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/user/{username}") @@ -1945,7 +1945,7 @@ async def permissions(self, username: str, full: bool = True) -> Result[Json]: PermissionListError: If the operation fails. References: - - `list-a-users-accessible-databases `__ + - `list-a-users-accessible-databases `__ """ # noqa: 501 request = Request( method=Method.GET, @@ -1981,8 +1981,8 @@ async def permission( PermissionGetError: If the operation fails. References: - - `get-a-users-database-access-level `__ - - `get-a-users-collection-access-level `__ + - `get-a-users-database-access-level `__ + - `get-a-users-collection-access-level `__ """ # noqa: 501 endpoint = f"/_api/user/{username}/database/{database}" if collection is not None: @@ -2022,8 +2022,8 @@ async def update_permission( is `False`. References: - - `set-a-users-database-access-level `__ - - `set-a-users-collection-access-level `__ + - `set-a-users-database-access-level `__ + - `set-a-users-collection-access-level `__ """ # noqa: E501 endpoint = f"/_api/user/{username}/database/{database}" if collection is not None: @@ -2067,8 +2067,8 @@ async def reset_permission( is `False`. References: - - `clear-a-users-database-access-level `__ - - `clear-a-users-collection-access-level `__ + - `clear-a-users-database-access-level `__ + - `clear-a-users-collection-access-level `__ """ # noqa: E501 endpoint = f"/_api/user/{username}/database/{database}" if collection is not None: @@ -2098,7 +2098,7 @@ async def jwt_secrets(self) -> Result[Json]: JWTSecretListError: If the operation fails. References: - - `get-information-about-the-loaded-jwt-secrets `__ + - `get-information-about-the-loaded-jwt-secrets `__ """ # noqa: 501 request = Request(method=Method.GET, endpoint="/_admin/server/jwt") @@ -2120,7 +2120,7 @@ async def reload_jwt_secrets(self) -> Result[Json]: JWTSecretReloadError: If the operation fails. References: - - `hot-reload-the-jwt-secrets-from-disk `__ + - `hot-reload-the-jwt-secrets-from-disk `__ """ # noqa: 501 request = Request( method=Method.POST, endpoint="/_admin/server/jwt", prefix_needed=False @@ -2238,7 +2238,7 @@ async def tls(self) -> Result[Json]: ServerTLSError: If the operation fails. References: - - `get-the-tls-data `__ + - `get-the-tls-data `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/server/tls") @@ -2262,7 +2262,7 @@ async def reload_tls(self) -> Result[Json]: ServerTLSReloadError: If the operation fails. References: - - `reload-the-tls-data `__ + - `reload-the-tls-data `__ """ # noqa: E501 request = Request(method=Method.POST, endpoint="/_admin/server/tls") @@ -2287,7 +2287,7 @@ async def encryption(self) -> Result[Json]: ServerEncryptionError: If the operation fails. References: - - `rotate-the-encryption-keys `__ + - `rotate-the-encryption-keys `__ """ # noqa: E501 request = Request(method=Method.POST, endpoint="/_admin/server/encryption") @@ -2360,7 +2360,7 @@ async def execute_transaction( TransactionExecuteError: If the operation fails on the server side. References: - - `execute-a-javascript-transaction `__ + - `execute-a-javascript-transaction `__ """ # noqa: 501 m = "JavaScript Transactions are deprecated from ArangoDB v3.12.0 onward and will be removed in a future version." # noqa: E501 warn(m, DeprecationWarning, stacklevel=2) @@ -2411,7 +2411,7 @@ async def version(self, details: bool = False) -> Result[Json]: ServerVersionError: If the operation fails on the server side. References: - - `get-the-server-version `__ + - `get-the-server-version `__ """ # noqa: E501 request = Request( method=Method.GET, endpoint="/_api/version", params={"details": details} @@ -2434,7 +2434,7 @@ async def tasks(self) -> Result[Jsons]: TaskListError: If the list cannot be retrieved. References: - - `list-all-tasks `__ + - `list-all-tasks `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/tasks") @@ -2459,7 +2459,7 @@ async def task(self, task_id: str) -> Result[Json]: TaskGetError: If the task details cannot be retrieved. References: - - `get-a-task `__ + - `get-a-task `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/tasks/{task_id}") @@ -2499,8 +2499,8 @@ async def create_task( TaskCreateError: If the task cannot be created. References: - - `create-a-task `__ - - `create-a-task-with-id `__ + - `create-a-task `__ + - `create-a-task-with-id `__ """ # noqa: E501 data: Json = {"command": command} if name is not None: @@ -2553,7 +2553,7 @@ async def delete_task( TaskDeleteError: If the operation fails. References: - - `delete-a-task `__ + - `delete-a-task `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/tasks/{task_id}") @@ -2576,7 +2576,7 @@ async def engine(self) -> Result[Json]: ServerEngineError: If the operation fails. References: - - `get-the-storage-engine-type `__ + - `get-the-storage-engine-type `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/engine") @@ -2598,7 +2598,7 @@ async def time(self) -> Result[datetime]: ServerTimeError: If the operation fails. References: - - `get-the-system-time `__ + - `get-the-system-time `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/time") @@ -2621,7 +2621,7 @@ async def check_availability(self) -> Result[str]: ServerCheckAvailabilityError: If the operation fails. References: - - `check-server-availability `__ + - `check-server-availability `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -2649,7 +2649,7 @@ async def support_info(self) -> Result[Json]: DatabaseSupportInfoError: If the operation fails. References: - - `get-information-about-the-deployment `__ + - `get-information-about-the-deployment `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/support-info") @@ -2672,7 +2672,7 @@ async def options(self) -> Result[Json]: ServerCurrentOptionsGetError: If the operation fails. References: - - `get-the-startup-option-configuration `__ + - `get-the-startup-option-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/options") @@ -2694,7 +2694,7 @@ async def options_available(self) -> Result[Json]: ServerAvailableOptionsGetError: If the operation fails. References: - - `get-the-available-startup-options `__ + - `get-the-available-startup-options `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/options-description") @@ -2716,7 +2716,7 @@ async def mode(self) -> Result[str]: ServerModeError: If the operation fails. References: - - `return-whether-or-not-a-server-is-in-read-only-mode `__ + - `return-whether-or-not-a-server-is-in-read-only-mode `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/server/mode") @@ -2740,7 +2740,7 @@ async def set_mode(self, mode: str) -> Result[str]: ServerModeSetError: If the operation fails. References: - - `set-the-server-mode-to-read-only-or-default `__ + - `set-the-server-mode-to-read-only-or-default `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -2766,7 +2766,7 @@ async def license(self) -> Result[Json]: ServerLicenseGetError: If the operation fails. References: - - `get-information-about-the-current-license `__ + - `get-information-about-the-current-license `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/license") @@ -2790,7 +2790,7 @@ async def set_license(self, license: str, force: Optional[bool] = False) -> None ServerLicenseSetError: If the operation fails. References: - - `set-a-new-license `__ + - `set-a-new-license `__ """ # noqa: E501 params: Params = {} if force is not None: @@ -2819,7 +2819,7 @@ async def shutdown(self, soft: Optional[bool] = None) -> None: ServerShutdownError: If the operation fails. References: - - `start-the-shutdown-sequence `__ + - `start-the-shutdown-sequence `__ """ # noqa: E501 params: Params = {} if soft is not None: @@ -2847,7 +2847,7 @@ async def shutdown_progress(self) -> Result[Json]: ServerShutdownProgressError: If the operation fails. References: - - `query-the-soft-shutdown-progress `__ + - `query-the-soft-shutdown-progress `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/shutdown") @@ -2884,7 +2884,7 @@ async def compact( DatabaseCompactError: If the operation fails. References: - - `compact-all-databases `__ + - `compact-all-databases `__ """ # noqa: E501 data = {} if change_level is not None: @@ -2911,7 +2911,7 @@ async def reload_routing(self) -> None: ServerReloadRoutingError: If the operation fails. References: - - `reload-the-routing-table `__ + - `reload-the-routing-table `__ """ # noqa: E501 request = Request(method=Method.POST, endpoint="/_admin/routing/reload") @@ -2934,7 +2934,7 @@ async def echo(self, body: Optional[Json] = None) -> Result[Json]: ServerEchoError: If the operation fails. References: - - `echo-a-request `__ + - `echo-a-request `__ """ # noqa: E501 data = body if body is not None else {} request = Request(method=Method.POST, endpoint="/_admin/echo", data=data) @@ -2960,7 +2960,7 @@ async def execute(self, command: str) -> Result[Any]: ServerExecuteError: If the execution fails. References: - - `execute-a-script `__ + - `execute-a-script `__ """ # noqa: E501 request = Request( method=Method.POST, endpoint="/_admin/execute", data=command.encode("utf-8") @@ -3002,7 +3002,7 @@ async def metrics(self, server_id: Optional[str] = None) -> Result[str]: ServerMetricsError: If the operation fails. References: - - `metrics-api-v2 `__ + - `metrics-api-v2 `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3058,7 +3058,7 @@ async def read_log_entries( ServerReadLogError: If the operation fails. References: - - `get-the-global-server-logs `__ + - `get-the-global-server-logs `__ """ # noqa: E501 params: Params = {} if upto is not None: @@ -3110,7 +3110,7 @@ async def log_levels( ServerLogLevelError: If the operation fails. References: - - `get-the-server-log-levels `__ + - `get-the-server-log-levels `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3166,7 +3166,7 @@ async def set_log_levels( ServerLogLevelSetError: If the operation fails. References: - - `set-the-structured-log-settings `__ + - `set-the-structured-log-settings `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3207,7 +3207,7 @@ async def reset_log_levels(self, server_id: Optional[str] = None) -> Result[Json ServerLogLevelResetError: If the operation fails. References: - - `reset-the-server-log-levels `__ + - `reset-the-server-log-levels `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3238,7 +3238,7 @@ async def log_settings(self) -> Result[Json]: ServerLogSettingError: If the operation fails. References: - - `get-the-structured-log-settings `__ + - `get-the-structured-log-settings `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -3279,7 +3279,7 @@ async def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: ServerLogSettingSetError: If the operation fails. References: - - `set-the-structured-log-settings `__ + - `set-the-structured-log-settings `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -3306,7 +3306,7 @@ async def api_calls(self) -> Result[Json]: ServerApiCallsError: If the operation fails. References: - - `get-recent-api-calls `__ + - `get-recent-api-calls `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -3379,7 +3379,7 @@ async def begin_transaction( TransactionInitError: If the operation fails on the server side. References: - - `begin-a-stream-transaction `__ + - `begin-a-stream-transaction `__ """ # noqa: E501 collections = dict() if read is not None: @@ -3463,7 +3463,7 @@ async def async_jobs( AsyncJobListError: If retrieval fails. References: - - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ + - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ """ # noqa: E501 params: Params = {} if count is not None: @@ -3496,7 +3496,7 @@ async def clear_async_jobs(self, threshold: Optional[float] = None) -> None: AsyncJobClearError: If the operation fails. References: - - `delete-async-job-results `__ + - `delete-async-job-results `__ """ # noqa: E501 if threshold is None: request = Request(method=Method.DELETE, endpoint="/_api/job/all") @@ -3516,7 +3516,7 @@ def response_handler(resp: Response) -> None: class TransactionDatabase(Database): """Database API tailored specifically for - `Stream Transactions `__. + `Stream Transactions `__. It allows you start a transaction, run multiple operations (eg. AQL queries) over a short period of time, and then commit or abort the transaction. @@ -3551,7 +3551,7 @@ async def transaction_status(self) -> str: TransactionStatusError: If the transaction is not found. References: - - `get-the-status-of-a-stream-transaction `__ + - `get-the-status-of-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -3573,7 +3573,7 @@ async def commit_transaction(self) -> None: TransactionCommitError: If the operation fails on the server side. References: - - `commit-a-stream-transaction `__ + - `commit-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -3593,7 +3593,7 @@ async def abort_transaction(self) -> None: TransactionAbortError: If the operation fails on the server side. References: - - `abort-a-stream-transaction `__ + - `abort-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -3620,7 +3620,7 @@ class AsyncDatabase(Database): and no results are stored on server. References: - - `jobs `__ + - `jobs `__ """ # noqa: E501 def __init__(self, connection: Connection, return_result: bool) -> None: diff --git a/arangoasync/foxx.py b/arangoasync/foxx.py index b74d933..fe02b41 100644 --- a/arangoasync/foxx.py +++ b/arangoasync/foxx.py @@ -65,7 +65,7 @@ async def services(self, exclude_system: Optional[bool] = False) -> Result[Jsons FoxxServiceListError: If retrieval fails. References: - - `list-the-installed-services `__ + - `list-the-installed-services `__ """ # noqa: E501 params: Params = {} if exclude_system is not None: @@ -98,7 +98,7 @@ async def service(self, mount: str) -> Result[Json]: FoxxServiceGetError: If retrieval fails. References: - - `get-the-service-description `__ + - `get-the-service-description `__ """ # noqa: E501 params: Params = {"mount": mount} request = Request( @@ -142,7 +142,7 @@ async def create_service( FoxxServiceCreateError: If installation fails. References: - - `install-a-new-service-mode `__ + - `install-a-new-service-mode `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -189,7 +189,7 @@ async def delete_service( FoxxServiceDeleteError: If operations fails. References: - - `uninstall-a-service `__ + - `uninstall-a-service `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -236,7 +236,7 @@ async def replace_service( FoxxServiceReplaceError: If replacement fails. References: - - `replace-a-service `__ + - `replace-a-service `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -298,7 +298,7 @@ async def update_service( FoxxServiceUpdateError: If upgrade fails. References: - - `upgrade-a-service `__ + - `upgrade-a-service `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -345,7 +345,7 @@ async def config(self, mount: str) -> Result[Json]: FoxxConfigGetError: If retrieval fails. References: - - `get-the-configuration-options `__ + - `get-the-configuration-options `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -375,7 +375,7 @@ async def update_config(self, mount: str, options: Json) -> Result[Json]: FoxxConfigUpdateError: If update fails. References: - - `update-the-configuration-options `__ + - `update-the-configuration-options `__ """ # noqa: E501 request = Request( method=Method.PATCH, @@ -407,7 +407,7 @@ async def replace_config(self, mount: str, options: Json) -> Result[Json]: FoxxConfigReplaceError: If replace fails. References: - - `replace-the-configuration-options `__ + - `replace-the-configuration-options `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -437,7 +437,7 @@ async def dependencies(self, mount: str) -> Result[Json]: FoxxDependencyGetError: If retrieval fails. References: - - `get-the-dependency-options `__ + - `get-the-dependency-options `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -467,7 +467,7 @@ async def update_dependencies(self, mount: str, options: Json) -> Result[Json]: FoxxDependencyUpdateError: If update fails. References: - - `update-the-dependency-options `__ + - `update-the-dependency-options `__ """ # noqa: E501 request = Request( method=Method.PATCH, @@ -498,7 +498,7 @@ async def replace_dependencies(self, mount: str, options: Json) -> Result[Json]: FoxxDependencyReplaceError: If replace fails. References: - - `replace-the-dependency-options `__ + - `replace-the-dependency-options `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -528,7 +528,7 @@ async def scripts(self, mount: str) -> Result[Json]: FoxxScriptListError: If retrieval fails. References: - - `list-the-service-scripts `__ + - `list-the-service-scripts `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -561,7 +561,7 @@ async def run_script( FoxxScriptRunError: If script fails. References: - - `run-a-service-script `__ + - `run-a-service-script `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -612,7 +612,7 @@ async def run_tests( FoxxTestRunError: If test fails. References: - - `run-the-service-tests `__ + - `run-the-service-tests `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -665,7 +665,7 @@ async def enable_development(self, mount: str) -> Result[Json]: FoxxDevModeEnableError: If the operation fails. References: - - `enable-the-development-mode `__ + - `enable-the-development-mode `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -697,7 +697,7 @@ async def disable_development(self, mount: str) -> Result[Json]: FoxxDevModeDisableError: If the operation fails. References: - - `disable-the-development-mode `__ + - `disable-the-development-mode `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -726,7 +726,7 @@ async def readme(self, mount: str) -> Result[str]: FoxxReadmeGetError: If retrieval fails. References: - - `get-the-service-readme `__ + - `get-the-service-readme `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -754,7 +754,7 @@ async def swagger(self, mount: str) -> Result[Json]: FoxxSwaggerGetError: If retrieval fails. References: - - `get-the-swagger-description `__ + - `get-the-swagger-description `__ """ # noqa: E501 request = Request( method=Method.GET, endpoint="/_api/foxx/swagger", params={"mount": mount} @@ -785,7 +785,7 @@ async def download(self, mount: str) -> Result[bytes]: FoxxDownloadError: If download fails. References: - - `download-a-service-bundle `__ + - `download-a-service-bundle `__ """ # noqa: E501 request = Request( method=Method.POST, endpoint="/_api/foxx/download", params={"mount": mount} @@ -812,7 +812,7 @@ async def commit(self, replace: Optional[bool] = None) -> None: FoxxCommitError: If commit fails. References: - - `commit-the-local-service-state `__ + - `commit-the-local-service-state `__ """ # noqa: E501 params: Params = {} if replace is not None: diff --git a/arangoasync/graph.py b/arangoasync/graph.py index 059a53e..dbb9732 100644 --- a/arangoasync/graph.py +++ b/arangoasync/graph.py @@ -93,7 +93,7 @@ async def properties(self) -> Result[GraphProperties]: GraphProperties: If the operation fails. References: - - `get-a-graph `__ + - `get-a-graph `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/gharial/{self._name}") @@ -132,7 +132,7 @@ async def vertex_collections(self) -> Result[List[str]]: VertexCollectionListError: If the operation fails. References: - - `list-vertex-collections `__ + - `list-vertex-collections `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -191,7 +191,7 @@ async def create_vertex_collection( VertexCollectionCreateError: If the operation fails. References: - - `add-a-vertex-collection `__ + - `add-a-vertex-collection `__ """ # noqa: E501 data: Json = {"collection": name} @@ -228,7 +228,7 @@ async def delete_vertex_collection(self, name: str, purge: bool = False) -> None VertexCollectionDeleteError: If the operation fails. References: - - `remove-a-vertex-collection `__ + - `remove-a-vertex-collection `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -300,7 +300,7 @@ async def vertex( DocumentParseError: If the document is malformed. References: - - `get-a-vertex `__ + - `get-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(vertex) return await self.vertex_collection(col).get( @@ -337,7 +337,7 @@ async def insert_vertex( DocumentParseError: If the document is malformed. References: - - `create-a-vertex `__ + - `create-a-vertex `__ """ # noqa: E501 return await self.vertex_collection(collection).insert( vertex, @@ -379,7 +379,7 @@ async def update_vertex( DocumentUpdateError: If update fails. References: - - `update-a-vertex `__ + - `update-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).update( @@ -425,7 +425,7 @@ async def replace_vertex( DocumentReplaceError: If replace fails. References: - - `replace-a-vertex `__ + - `replace-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).replace( @@ -468,7 +468,7 @@ async def delete_vertex( DocumentDeleteError: If deletion fails. References: - - `remove-a-vertex `__ + - `remove-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).delete( @@ -551,7 +551,7 @@ async def edge_collections(self) -> Result[List[str]]: EdgeCollectionListError: If the operation fails. References: - - `list-edge-collections `__ + - `list-edge-collections `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -602,7 +602,7 @@ async def create_edge_definition( EdgeDefinitionCreateError: If the operation fails. References: - - `add-an-edge-definition `__ + - `add-an-edge-definition `__ """ # noqa: E501 data: Json = { "collection": edge_collection, @@ -659,7 +659,7 @@ async def replace_edge_definition( EdgeDefinitionReplaceError: If the operation fails. References: - - `replace-an-edge-definition `__ + - `replace-an-edge-definition `__ """ # noqa: E501 data: Json = { "collection": edge_collection, @@ -712,7 +712,7 @@ async def delete_edge_definition( EdgeDefinitionDeleteError: If the operation fails. References: - - `remove-an-edge-definition `__ + - `remove-an-edge-definition `__ """ # noqa: E501 params: Params = {} if drop_collections is not None: @@ -793,7 +793,7 @@ async def edge( DocumentParseError: If the document is malformed. References: - - `get-an-edge `__ + - `get-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(edge) return await self.edge_collection(col).get( @@ -832,7 +832,7 @@ async def insert_edge( DocumentParseError: If the document is malformed. References: - - `create-an-edge `__ + - `create-an-edge `__ """ # noqa: E501 return await self.edge_collection(collection).insert( edge, @@ -875,7 +875,7 @@ async def update_edge( DocumentUpdateError: If update fails. References: - - `update-an-edge `__ + - `update-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).update( @@ -923,7 +923,7 @@ async def replace_edge( DocumentReplaceError: If replace fails. References: - - `replace-an-edge `__ + - `replace-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).replace( @@ -967,7 +967,7 @@ async def delete_edge( DocumentDeleteError: If deletion fails. References: - - `remove-an-edge `__ + - `remove-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).delete( @@ -1001,7 +1001,7 @@ async def edges( EdgeListError: If retrieval fails. References: - - `get-inbound-and-outbound-edges `__ + - `get-inbound-and-outbound-edges `__ """ # noqa: E501 return await self.edge_collection(collection).edges( vertex, diff --git a/arangoasync/job.py b/arangoasync/job.py index 13794fe..9f64764 100644 --- a/arangoasync/job.py +++ b/arangoasync/job.py @@ -27,7 +27,7 @@ class AsyncJob(Generic[T]): response_handler: HTTP response handler References: - - `jobs `__ + - `jobs `__ """ # noqa: E501 def __init__( @@ -68,7 +68,7 @@ async def status(self) -> str: AsyncJobStatusError: If retrieval fails or the job is not found. References: - - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ + - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/job/{self._id}") response = await self._conn.send_request(request) @@ -101,7 +101,7 @@ async def result(self) -> T: is still pending. References: - - `get-the-results-of-an-async-job `__ + - `get-the-results-of-an-async-job `__ """ # noqa: E501 request = Request(method=Method.PUT, endpoint=f"/_api/job/{self._id}") response = await self._conn.send_request(request) @@ -142,7 +142,7 @@ async def cancel(self, ignore_missing: bool = False) -> bool: AsyncJobCancelError: If cancellation fails. References: - - `cancel-an-async-job `__ + - `cancel-an-async-job `__ """ # noqa: E501 request = Request(method=Method.PUT, endpoint=f"/_api/job/{self._id}/cancel") response = await self._conn.send_request(request) @@ -173,7 +173,7 @@ async def clear( AsyncJobClearError: If deletion fails. References: - - `delete-async-job-results `__ + - `delete-async-job-results `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/job/{self._id}") resp = await self._conn.send_request(request) diff --git a/arangoasync/replication.py b/arangoasync/replication.py index 9d96709..436dc94 100644 --- a/arangoasync/replication.py +++ b/arangoasync/replication.py @@ -64,7 +64,7 @@ async def inventory( ReplicationInventoryError: If retrieval fails. References: - - `get-a-replication-inventory `__ + - `get-a-replication-inventory `__ """ # noqa: E501 params: Params = dict() params["batchId"] = batch_id @@ -112,7 +112,7 @@ async def dump( ReplicationDumpError: If retrieval fails. References: - - `get-a-replication-dump `__ + - `get-a-replication-dump `__ """ # noqa: E501 params: Params = dict() params["collection"] = collection @@ -149,7 +149,7 @@ async def cluster_inventory( ReplicationClusterInventoryError: If retrieval fails. References: - - `get-the-cluster-collections-and-indexes `__ + - `get-the-cluster-collections-and-indexes `__ """ # noqa: E501 params: Params = {} if include_system is not None: @@ -179,7 +179,7 @@ async def logger_state(self) -> Result[Json]: ReplicationLoggerStateError: If retrieval fails. References: - - `get-the-replication-logger-state `__ + - `get-the-replication-logger-state `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -204,7 +204,7 @@ async def applier_config(self) -> Result[Json]: ReplicationApplierConfigError: If retrieval fails. References: - - `get-the-replication-applier-configuration `__ + - `get-the-replication-applier-configuration `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -229,7 +229,7 @@ async def applier_state(self) -> Result[Json]: ReplicationApplierStateError: If retrieval fails. References: - - `get-the-replication-applier-state `__ + - `get-the-replication-applier-state `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -254,7 +254,7 @@ async def server_id(self) -> Result[str]: ReplicationServerIDError: If retrieval fails. References: - - `get-the-replication-server-id `__ + - `get-the-replication-server-id `__ """ # noqa: E501 request = Request( method=Method.GET, diff --git a/arangoasync/typings.py b/arangoasync/typings.py index 0d85035..d6adb4d 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -223,7 +223,7 @@ class KeyOptions(JsonWrapper): } References: - - `create-a-collection `__ + - `create-a-collection `__ """ # noqa: E501 def __init__( @@ -310,7 +310,7 @@ class CollectionInfo(JsonWrapper): } References: - - `get-the-collection-information `__ + - `get-the-collection-information `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -384,7 +384,7 @@ class UserInfo(JsonWrapper): } References: - - `create-a-user `__ + - `create-a-user `__ """ # noqa: E501 def __init__( @@ -484,7 +484,7 @@ class ServerStatusInformation(JsonWrapper): } References: - - `get-server-status-information `__ + - `get-server-status-information `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -543,7 +543,7 @@ class DatabaseProperties(JsonWrapper): """Properties of the database. References: - - `get-information-about-the-current-database `__ + - `get-information-about-the-current-database `__ """ # noqa: E501 def __init__(self, data: Json, strip_result: bool = False) -> None: @@ -650,7 +650,7 @@ class CollectionProperties(JsonWrapper): } References: - - `get-the-properties-of-a-collection `__ + - `get-the-properties-of-a-collection `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -870,7 +870,7 @@ class CollectionStatistics(JsonWrapper): } References: - - `get-the-collection-statistics `__ + - `get-the-collection-statistics `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -976,7 +976,7 @@ class IndexProperties(JsonWrapper): } References: - - `get-an-index `__ + - `get-an-index `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1253,7 +1253,7 @@ class QueryProperties(JsonWrapper): } References: - - `create-a-cursor `__ + - `create-a-cursor `__ """ # noqa: E501 def __init__( @@ -1414,7 +1414,7 @@ class QueryExecutionPlan(JsonWrapper): """The execution plan of an AQL query. References: - - `plan `__ + - `plan `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1468,7 +1468,7 @@ class QueryExecutionProfile(JsonWrapper): } References: - - `profile `__ + - `profile `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1536,7 +1536,7 @@ class QueryExecutionStats(JsonWrapper): } References: - - `stats `__ + - `stats `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1615,7 +1615,7 @@ class QueryExecutionExtra(JsonWrapper): """Extra information about the query result. References: - - `extra `__ + - `extra `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1659,7 +1659,7 @@ class QueryTrackingConfiguration(JsonWrapper): } References: - - `get-the-aql-query-tracking-configuration `__ + - `get-the-aql-query-tracking-configuration `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1718,7 +1718,7 @@ class QueryExplainOptions(JsonWrapper): } References: - - `explain-an-aql-query `__ + - `explain-an-aql-query `__ """ # noqa: E501 def __init__( @@ -1764,8 +1764,8 @@ class QueryCacheProperties(JsonWrapper): } References: - - `get-the-aql-query-results-cache-configuration `__ - - `set-the-aql-query-results-cache-configuration `__ + - `get-the-aql-query-results-cache-configuration `__ + - `set-the-aql-query-results-cache-configuration `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1818,9 +1818,9 @@ class GraphProperties(JsonWrapper): } References: - - `get-a-graph `__ - - `list-all-graphs `__ - - `create-a-graph `__ + - `get-a-graph `__ + - `list-all-graphs `__ + - `create-a-graph `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1927,7 +1927,7 @@ class GraphOptions(JsonWrapper): graph. References: - - `create-a-graph `__ + - `create-a-graph `__ """ # noqa: E501 def __init__( @@ -1982,7 +1982,7 @@ class VertexCollectionOptions(JsonWrapper): be a string and a valid collection name. References: - - `add-a-vertex-collection `__ + - `add-a-vertex-collection `__ """ # noqa: E501 def __init__( @@ -2009,7 +2009,7 @@ class EdgeDefinitionOptions(JsonWrapper): be a string and a valid collection name. References: - - `add-an-edge-definition `__ + - `add-an-edge-definition `__ """ # noqa: E501 def __init__( diff --git a/docs/analyzer.rst b/docs/analyzer.rst index cd92018..851ab02 100644 --- a/docs/analyzer.rst +++ b/docs/analyzer.rst @@ -3,7 +3,7 @@ Analyzers For more information on analyzers, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** diff --git a/docs/aql.rst b/docs/aql.rst index 69a9bf6..97d4f6c 100644 --- a/docs/aql.rst +++ b/docs/aql.rst @@ -7,7 +7,7 @@ operations such as creating or deleting :doc:`databases `, :doc:`collections ` or :doc:`indexes `. For more information, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai AQL Queries =========== diff --git a/docs/backup.rst b/docs/backup.rst index de36041..93085f0 100644 --- a/docs/backup.rst +++ b/docs/backup.rst @@ -5,7 +5,7 @@ Hot Backups are near instantaneous consistent snapshots of an entire ArangoDB de This includes all databases, collections, indexes, Views, graphs, and users at any given time. For more information, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** diff --git a/docs/certificates.rst b/docs/certificates.rst index ee49e13..c0ba7af 100644 --- a/docs/certificates.rst +++ b/docs/certificates.rst @@ -129,4 +129,4 @@ See the `ArangoDB Manual`_ for more information on security features. # Reload TLS data tls = await db.reload_tls() -.. _ArangoDB Manual: https://docs.arangodb.com/stable/develop/http-api/security/ +.. _ArangoDB Manual: https://docs.arango.ai/stable/develop/http-api/security/ diff --git a/docs/cluster.rst b/docs/cluster.rst index c5e58aa..d5c4908 100644 --- a/docs/cluster.rst +++ b/docs/cluster.rst @@ -6,7 +6,7 @@ cluster nodes and the cluster as a whole, as well as monitor and administrate cluster deployments. For more information on the design and architecture, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai .. code-block:: python diff --git a/docs/document.rst b/docs/document.rst index da6434b..09b87e0 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -23,7 +23,7 @@ For more information on documents and associated terminologies, refer to `ArangoDB Manual`_. Here is an example of a valid document in "students" collection: -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai .. code-block:: json diff --git a/docs/foxx.rst b/docs/foxx.rst index 818c80e..91e3423 100644 --- a/docs/foxx.rst +++ b/docs/foxx.rst @@ -4,7 +4,7 @@ Foxx **Foxx** is a microservice framework which lets you define custom HTTP endpoints that extend ArangoDB's REST API. For more information, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** diff --git a/docs/graph.rst b/docs/graph.rst index 0f0bbbf..b2c2467 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -7,7 +7,7 @@ A **graph** consists of vertices and edges. Vertices are stored as documents in their relations are specified with :ref:`edge definitions `. For more information, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** diff --git a/docs/indexes.rst b/docs/indexes.rst index 911efaa..63e2359 100644 --- a/docs/indexes.rst +++ b/docs/indexes.rst @@ -7,7 +7,7 @@ cannot be deleted or modified. Every edge collection has additional indexes on fields ``_from`` and ``_to``. For more information on indexes, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** diff --git a/docs/overview.rst b/docs/overview.rst index f723234..77c0fc7 100644 --- a/docs/overview.rst +++ b/docs/overview.rst @@ -64,7 +64,7 @@ You may also use the client without a context manager, but you must ensure to cl Another example with `graphs`_: -.. _graphs: https://docs.arangodb.com/stable/graphs/ +.. _graphs: https://docs.arango.ai/stable/graphs/ .. code-block:: python diff --git a/docs/view.rst b/docs/view.rst index f680b54..3a1ef06 100644 --- a/docs/view.rst +++ b/docs/view.rst @@ -4,7 +4,7 @@ Views All types of views are supported. . For more information on **view** management, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** @@ -63,7 +63,7 @@ management, refer to `ArangoDB Manual`_. For more information on the content of view **properties**, see `Search Alias Views`_ and `Arangosearch Views`_. -.. _Search Alias Views: https://docs.arangodb.com/stable/develop/http-api/views/search-alias-views/ -.. _Arangosearch Views: https://docs.arangodb.com/stable/develop/http-api/views/arangosearch-views/ +.. _Search Alias Views: https://docs.arango.ai/stable/develop/http-api/views/search-alias-views/ +.. _Arangosearch Views: https://docs.arango.ai/stable/develop/http-api/views/arangosearch-views/ Refer to :class:`arangoasync.database.StandardDatabase` class for API specification. From 4f7bafb05d82d68b97e39b2b22e6b55add835d43 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sat, 20 Dec 2025 23:55:58 +0800 Subject: [PATCH 46/47] Fixing job parameters --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c7f0fdd..cb02c17 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -87,7 +87,7 @@ jobs: fi if [ << parameters.arangodb_license >> != "enterprise" ]; then - args+=("--skip enterprise") + args+=("--skip" "enterprise") fi echo "Running pytest with args: ${args[@]}" From 6e9b9d587aaacfb0ea39cdd66518d3f4bb725439 Mon Sep 17 00:00:00 2001 From: Alex Petenchea Date: Sun, 21 Dec 2025 00:15:31 +0800 Subject: [PATCH 47/47] URL fix --- README.md | 2 +- arangoasync/aql.py | 38 +++---- arangoasync/backup.py | 12 +-- arangoasync/cluster.py | 24 ++--- arangoasync/collection.py | 90 ++++++++--------- arangoasync/cursor.py | 6 +- arangoasync/database.py | 198 ++++++++++++++++++------------------- arangoasync/foxx.py | 42 ++++---- arangoasync/graph.py | 38 +++---- arangoasync/job.py | 10 +- arangoasync/replication.py | 14 +-- arangoasync/typings.py | 48 ++++----- arangoasync/version.py | 2 +- docs/certificates.rst | 2 +- docs/overview.rst | 2 +- docs/view.rst | 4 +- 16 files changed, 266 insertions(+), 266 deletions(-) diff --git a/README.md b/README.md index 1232efa..e35c413 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ async def main(): student_names.append(doc["name"]) ``` -Another example with [graphs](https://docs.arango.ai/stable/graphs/): +Another example with [graphs](https://docs.arango.ai/arangodb/stable/graphs/): ```python async def main(): diff --git a/arangoasync/aql.py b/arangoasync/aql.py index ec8efe4..ea57b75 100644 --- a/arangoasync/aql.py +++ b/arangoasync/aql.py @@ -78,7 +78,7 @@ async def entries(self) -> Result[Jsons]: AQLCacheEntriesError: If retrieval fails. References: - - `list-the-entries-of-the-aql-query-results-cache `__ + - `list-the-entries-of-the-aql-query-results-cache `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-cache/entries") @@ -99,7 +99,7 @@ async def plan_entries(self) -> Result[Jsons]: AQLCacheEntriesError: If retrieval fails. References: - - `list-the-entries-of-the-aql-query-plan-cache `__ + - `list-the-entries-of-the-aql-query-plan-cache `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-plan-cache") @@ -117,7 +117,7 @@ async def clear(self) -> Result[None]: AQLCacheClearError: If clearing the cache fails. References: - - `clear-the-aql-query-results-cache `__ + - `clear-the-aql-query-results-cache `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint="/_api/query-cache") @@ -134,7 +134,7 @@ async def clear_plan(self) -> Result[None]: AQLCacheClearError: If clearing the cache fails. References: - - `clear-the-aql-query-plan-cache `__ + - `clear-the-aql-query-plan-cache `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint="/_api/query-plan-cache") @@ -154,7 +154,7 @@ async def properties(self) -> Result[QueryCacheProperties]: AQLCachePropertiesError: If retrieval fails. References: - - `get-the-aql-query-results-cache-configuration `__ + - `get-the-aql-query-results-cache-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-cache/properties") @@ -193,7 +193,7 @@ async def configure( AQLCacheConfigureError: If setting the configuration fails. References: - - `set-the-aql-query-results-cache-configuration `__ + - `set-the-aql-query-results-cache-configuration `__ """ # noqa: E501 data: Json = dict() if mode is not None: @@ -298,7 +298,7 @@ async def execute( Cursor: Result cursor. References: - - `create-a-cursor `__ + - `create-a-cursor `__ """ # noqa: E501 data: Json = dict(query=query) if count is not None: @@ -353,7 +353,7 @@ async def tracking(self) -> Result[QueryTrackingConfiguration]: AQLQueryTrackingGetError: If retrieval fails. References: - - `get-the-aql-query-tracking-configuration `__ + - `get-the-aql-query-tracking-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query/properties") @@ -397,7 +397,7 @@ async def set_tracking( AQLQueryTrackingSetError: If setting the configuration fails. References: - - `update-the-aql-query-tracking-configuration `__ + - `update-the-aql-query-tracking-configuration `__ """ # noqa: E501 data: Json = dict() @@ -462,7 +462,7 @@ async def queries(self, all_queries: bool = False) -> Result[Jsons]: AQLQueryListError: If retrieval fails. References: - - `list-the-running-queries `__ + - `list-the-running-queries `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -493,7 +493,7 @@ async def slow_queries(self, all_queries: bool = False) -> Result[Jsons]: AQLQueryListError: If retrieval fails. References: - - `list-the-slow-aql-queries `__ + - `list-the-slow-aql-queries `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -523,7 +523,7 @@ async def clear_slow_queries(self, all_queries: bool = False) -> Result[None]: AQLQueryClearError: If retrieval fails. References: - - `clear-the-list-of-slow-aql-queries `__ + - `clear-the-list-of-slow-aql-queries `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -560,7 +560,7 @@ async def kill( AQLQueryKillError: If killing the query fails. References: - - `kill-a-running-aql-query `__ + - `kill-a-running-aql-query `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -598,7 +598,7 @@ async def explain( AQLQueryExplainError: If retrieval fails. References: - - `explain-an-aql-query `__ + - `explain-an-aql-query `__ """ # noqa: E501 data: Json = dict(query=query) if bind_vars is not None: @@ -634,7 +634,7 @@ async def validate(self, query: str) -> Result[Json]: AQLQueryValidateError: If validation fails. References: - - `parse-an-aql-query `__ + - `parse-an-aql-query `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -659,7 +659,7 @@ async def query_rules(self) -> Result[Jsons]: AQLQueryRulesGetError: If retrieval fails. References: - - `list-all-aql-optimizer-rules `__ + - `list-all-aql-optimizer-rules `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query/rules") @@ -684,7 +684,7 @@ async def functions(self, namespace: Optional[str] = None) -> Result[Jsons]: AQLFunctionListError: If retrieval fails. References: - - `list-the-registered-user-defined-aql-functions `__ + - `list-the-registered-user-defined-aql-functions `__ """ # noqa: E501 params: Json = dict() if namespace is not None: @@ -726,7 +726,7 @@ async def create_function( AQLFunctionCreateError: If registration fails. References: - - `create-a-user-defined-aql-function `__ + - `create-a-user-defined-aql-function `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -765,7 +765,7 @@ async def delete_function( AQLFunctionDeleteError: If removal fails. References: - - `remove-a-user-defined-aql-function `__ + - `remove-a-user-defined-aql-function `__ """ # noqa: E501 params: Json = dict() if group is not None: diff --git a/arangoasync/backup.py b/arangoasync/backup.py index e0847e0..7be69cd 100644 --- a/arangoasync/backup.py +++ b/arangoasync/backup.py @@ -49,7 +49,7 @@ async def get(self, backup_id: Optional[str] = None) -> Result[Json]: BackupGetError: If the operation fails. References: - - `list-backups `__ + - `list-backups `__ """ # noqa: E501 data: Json = {} if backup_id is not None: @@ -97,7 +97,7 @@ async def create( BackupCreateError: If the backup creation fails. References: - - `create-backup `__ + - `create-backup `__ """ # noqa: E501 data: Json = {} if label is not None: @@ -137,7 +137,7 @@ async def restore(self, backup_id: str) -> Result[Json]: BackupRestoreError: If the restore operation fails. References: - - `restore-backup `__ + - `restore-backup `__ """ # noqa: E501 data: Json = {"id": backup_id} request = Request( @@ -165,7 +165,7 @@ async def delete(self, backup_id: str) -> None: BackupDeleteError: If the delete operation fails. References: - - `delete-backup `__ + - `delete-backup `__ """ # noqa: E501 data: Json = {"id": backup_id} request = Request( @@ -209,7 +209,7 @@ async def upload( BackupUploadError: If upload operation fails. References: - - `upload-a-backup-to-a-remote-repository `__ + - `upload-a-backup-to-a-remote-repository `__ """ # noqa: E501 data: Json = {} if upload_id is not None: @@ -265,7 +265,7 @@ async def download( BackupDownloadError: If the download operation fails. References: - - `download-a-backup-from-a-remote-repository `__ + - `download-a-backup-from-a-remote-repository `__ """ # noqa: E501 data: Json = {} if download_id is not None: diff --git a/arangoasync/cluster.py b/arangoasync/cluster.py index 39e3d56..fa42ea3 100644 --- a/arangoasync/cluster.py +++ b/arangoasync/cluster.py @@ -45,7 +45,7 @@ async def health(self) -> Result[Json]: ClusterHealthError: If retrieval fails. References: - - `get-the-cluster-health `__ + - `get-the-cluster-health `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -74,7 +74,7 @@ async def statistics(self, db_server: str) -> Result[Json]: ClusterStatisticsError: If retrieval fails. References: - - `get-the-statistics-of-a-db-server `__ + - `get-the-statistics-of-a-db-server `__ """ # noqa: E501 params: Params = {"DBserver": db_server} @@ -103,7 +103,7 @@ async def endpoints(self) -> Result[List[str]]: ClusterEndpointsError: If retrieval fails. References: - - `list-all-coordinator-endpoints `__ + - `list-all-coordinator-endpoints `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -129,7 +129,7 @@ async def server_id(self) -> Result[str]: ClusterServerIDError: If retrieval fails. References: - - `get-the-server-id `__ + - `get-the-server-id `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -154,7 +154,7 @@ async def server_role(self) -> Result[str]: ClusterServerRoleError: If retrieval fails. References: - - `get-the-server-role `__ + - `get-the-server-role `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -182,7 +182,7 @@ async def toggle_maintenance_mode(self, mode: str) -> Result[Json]: ClusterMaintenanceModeError: If the toggle operation fails. References: - - `toggle-cluster-maintenance-mode `__ + - `toggle-cluster-maintenance-mode `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -212,7 +212,7 @@ async def server_maintenance_mode(self, server_id: str) -> Result[Json]: ClusterMaintenanceModeError: If retrieval fails. References: - - `get-the-maintenance-status-of-a-db-server `__ + - `get-the-maintenance-status-of-a-db-server `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -242,7 +242,7 @@ async def toggle_server_maintenance_mode( ClusterMaintenanceModeError: If the operation fails. References: - - `set-the-maintenance-status-of-a-db-server `__ + - `set-the-maintenance-status-of-a-db-server `__ """ # noqa: E501 data: Json = {"mode": mode} if timeout is not None: @@ -271,7 +271,7 @@ async def calculate_imbalance(self) -> Result[Json]: ClusterRebalanceError: If retrieval fails. References: - - `get-the-current-cluster-imbalance `__ + - `get-the-current-cluster-imbalance `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/cluster/rebalance") @@ -315,7 +315,7 @@ async def calculate_rebalance_plan( ClusterRebalanceError: If retrieval fails. References: - - `compute-a-set-of-move-shard-operations-to-improve-balance `__ + - `compute-a-set-of-move-shard-operations-to-improve-balance `__ """ # noqa: E501 data: Json = dict(version=version) if databases_excluded is not None: @@ -380,7 +380,7 @@ async def rebalance( ClusterRebalanceError: If retrieval fails. References: - - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ + - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ """ # noqa: E501 data: Json = dict(version=version) if databases_excluded is not None: @@ -431,7 +431,7 @@ async def execute_rebalance_plan( ClusterRebalanceError: If the execution fails. References: - - `execute-a-set-of-move-shard-operations `__ + - `execute-a-set-of-move-shard-operations `__ """ # noqa: E501 data: Json = dict(version=version, moves=moves) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index fae501a..cc372bf 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -333,7 +333,7 @@ async def indexes( IndexListError: If retrieval fails. References: - - `list-all-indexes-of-a-collection `__ + - `list-all-indexes-of-a-collection `__ """ # noqa: E501 params: Params = dict(collection=self._name) if with_stats is not None: @@ -368,7 +368,7 @@ async def get_index(self, id: str | int) -> Result[IndexProperties]: IndexGetError: If retrieval fails. References: - `get-an-index `__ + `get-an-index `__ """ # noqa: E501 if isinstance(id, int): full_id = f"{self._name}/{id}" @@ -408,12 +408,12 @@ async def add_index( IndexCreateError: If index creation fails. References: - - `create-an-index `__ - - `create-a-persistent-index `__ - - `create-an-inverted-index `__ - - `create-a-ttl-index `__ - - `create-a-multi-dimensional-index `__ - - `create-a-geo-spatial-index `__ + - `create-an-index `__ + - `create-a-persistent-index `__ + - `create-an-inverted-index `__ + - `create-a-ttl-index `__ + - `create-a-multi-dimensional-index `__ + - `create-a-geo-spatial-index `__ """ # noqa: E501 options = options or {} request = Request( @@ -447,7 +447,7 @@ async def delete_index( IndexDeleteError: If deletion fails. References: - - `delete-an-index `__ + - `delete-an-index `__ """ # noqa: E501 if isinstance(id, int): full_id = f"{self._name}/{id}" @@ -478,7 +478,7 @@ async def load_indexes(self) -> Result[bool]: IndexLoadError: If loading fails. References: - - `load-collection-indexes-into-memory `__ + - `load-collection-indexes-into-memory `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -499,7 +499,7 @@ async def recalculate_count(self) -> None: CollectionRecalculateCountError: If re-calculation fails. References: - - `recalculate-the-document-count-of-a-collection `__ + - `recalculate-the-document-count-of-a-collection `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -522,7 +522,7 @@ async def properties(self) -> Result[CollectionProperties]: CollectionPropertiesError: If retrieval fails. References: - - `get-the-properties-of-a-collection `__ + - `get-the-properties-of-a-collection `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -572,7 +572,7 @@ async def configure( CollectionConfigureError: If configuration fails. References: - - `change-the-properties-of-a-collection `__ + - `change-the-properties-of-a-collection `__ """ # noqa: E501 data: Json = {} if cache_enabled is not None: @@ -617,7 +617,7 @@ async def rename(self, new_name: str) -> None: CollectionRenameError: If rename fails. References: - - `rename-a-collection `__ + - `rename-a-collection `__ """ # noqa: E501 data: Json = {"name": new_name} request = Request( @@ -644,7 +644,7 @@ async def compact(self) -> Result[CollectionInfo]: CollectionCompactError: If compaction fails. References: - - `compact-a-collection `__ + - `compact-a-collection `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -677,7 +677,7 @@ async def truncate( CollectionTruncateError: If truncation fails. References: - - `truncate-a-collection `__ + - `truncate-a-collection `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -707,7 +707,7 @@ async def count(self) -> Result[int]: DocumentCountError: If retrieval fails. References: - - `get-the-document-count-of-a-collection `__ + - `get-the-document-count-of-a-collection `__ """ # noqa: E501 request = Request( method=Method.GET, endpoint=f"/_api/collection/{self.name}/count" @@ -731,7 +731,7 @@ async def statistics(self) -> Result[CollectionStatistics]: CollectionStatisticsError: If retrieval fails. References: - - `get-the-collection-statistics `__ + - `get-the-collection-statistics `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -761,7 +761,7 @@ async def responsible_shard(self, document: Json) -> Result[str]: CollectionResponsibleShardError: If retrieval fails. References: - - `get-the-responsible-shard-for-a-document `__ + - `get-the-responsible-shard-for-a-document `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -793,7 +793,7 @@ async def shards(self, details: Optional[bool] = None) -> Result[Json]: CollectionShardsError: If retrieval fails. References: - - `get-the-shard-ids-of-a-collection `__ + - `get-the-shard-ids-of-a-collection `__ """ # noqa: E501 params: Params = {} if details is not None: @@ -822,7 +822,7 @@ async def revision(self) -> Result[str]: CollectionRevisionError: If retrieval fails. References: - - `get-the-collection-revision-id `__ + - `get-the-collection-revision-id `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -852,7 +852,7 @@ async def checksum( CollectionChecksumError: If retrieval fails. References: - - `get-the-collection-checksum `__ + - `get-the-collection-checksum `__ """ # noqa: E501 params: Params = {} if with_rev is not None: @@ -899,7 +899,7 @@ async def has( DocumentGetError: If retrieval fails. References: - - `get-a-document-header `__ + - `get-a-document-header `__ """ # noqa: E501 handle = self._get_doc_id(document) @@ -956,7 +956,7 @@ async def get_many( DocumentGetError: If retrieval fails. References: - - `get-multiple-documents `__ + - `get-multiple-documents `__ """ # noqa: E501 params: Params = {"onlyget": True} if ignore_revs is not None: @@ -1283,7 +1283,7 @@ async def insert_many( DocumentInsertError: If insertion fails. References: - - `create-multiple-documents `__ + - `create-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1373,7 +1373,7 @@ async def replace_many( DocumentReplaceError: If replacing fails. References: - - `replace-multiple-documents `__ + - `replace-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1466,7 +1466,7 @@ async def update_many( DocumentUpdateError: If update fails. References: - - `update-multiple-documents `__ + - `update-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1548,7 +1548,7 @@ async def delete_many( DocumentRemoveError: If removal fails. References: - - `remove-multiple-documents `__ + - `remove-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1640,7 +1640,7 @@ async def import_bulk( DocumentInsertError: If import fails. References: - - `import-json-data-as-documents `__ + - `import-json-data-as-documents `__ """ # noqa: E501 params: Params = dict() params["collection"] = self.name @@ -1730,7 +1730,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-a-document `__ + - `get-a-document `__ """ # noqa: E501 handle = self._get_doc_id(document) @@ -1818,7 +1818,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-a-document `__ + - `create-a-document `__ """ # noqa: E501 if isinstance(document, dict): document = cast(T, self._ensure_key_from_id(document)) @@ -1923,7 +1923,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-a-document `__ + - `update-a-document `__ """ # noqa: E501 params: Params = {} if ignore_revs is not None: @@ -2017,7 +2017,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-a-document `__ + - `replace-a-document `__ """ # noqa: E501 params: Params = {} if ignore_revs is not None: @@ -2105,7 +2105,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-a-document `__ + - `remove-a-document `__ """ # noqa: E501 handle = self._get_doc_id(cast(str | Json, document)) @@ -2232,7 +2232,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-a-vertex `__ + - `get-a-vertex `__ """ # noqa: E501 handle = self._get_doc_id(vertex) @@ -2294,7 +2294,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-a-vertex `__ + - `create-a-vertex `__ """ # noqa: E501 if isinstance(vertex, dict): vertex = cast(T, self._ensure_key_from_id(vertex)) @@ -2359,7 +2359,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-a-vertex `__ + - `update-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2434,7 +2434,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-a-vertex `__ + - `replace-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2506,7 +2506,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-a-vertex `__ + - `remove-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2631,7 +2631,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-an-edge `__ + - `get-an-edge `__ """ # noqa: E501 handle = self._get_doc_id(edge) @@ -2694,7 +2694,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-an-edge `__ + - `create-an-edge `__ """ # noqa: E501 if isinstance(edge, dict): edge = cast(T, self._ensure_key_from_id(edge)) @@ -2763,7 +2763,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-an-edge `__ + - `update-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2842,7 +2842,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-an-edge `__ + - `replace-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2917,7 +2917,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-an-edge `__ + - `remove-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2978,7 +2978,7 @@ async def edges( EdgeListError: If retrieval fails. References: - - `get-inbound-and-outbound-edges `__ + - `get-inbound-and-outbound-edges `__ """ # noqa: E501 params: Params = { "vertex": self._get_doc_id(vertex, validate=False), diff --git a/arangoasync/cursor.py b/arangoasync/cursor.py index 1e3cc6c..68ecdad 100644 --- a/arangoasync/cursor.py +++ b/arangoasync/cursor.py @@ -192,8 +192,8 @@ async def fetch(self, batch_id: Optional[str] = None) -> List[Any]: CursorStateError: If the cursor ID is not set. References: - - `read-the-next-batch-from-a-cursor `__ - - `read-a-batch-from-the-cursor-again `__ + - `read-the-next-batch-from-a-cursor `__ + - `read-a-batch-from-the-cursor-again `__ """ # noqa: E501 if self._id is None: raise CursorStateError("Cursor ID is not set") @@ -229,7 +229,7 @@ async def close(self, ignore_missing: bool = False) -> bool: CursorCloseError: If the cursor failed to close. References: - - `delete-a-cursor `__ + - `delete-a-cursor `__ """ # noqa: E501 if self._id is None: return False diff --git a/arangoasync/database.py b/arangoasync/database.py index 8e700e5..c1dc1b9 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -258,7 +258,7 @@ async def properties(self) -> Result[DatabaseProperties]: DatabasePropertiesError: If retrieval fails. References: - - `get-information-about-the-current-database `__ + - `get-information-about-the-current-database `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database/current") @@ -281,7 +281,7 @@ async def status(self) -> Result[ServerStatusInformation]: ServerSatusError: If retrieval fails. References: - - `get-server-status-information `__ + - `get-server-status-information `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/status") @@ -305,7 +305,7 @@ async def databases(self) -> Result[List[str]]: DatabaseListError: If retrieval fails. References: - - `list-all-databases `__ + - `list-all-databases `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database") @@ -333,7 +333,7 @@ async def databases_accessible_to_user(self) -> Result[List[str]]: DatabaseListError: If retrieval fails. References: - - `list-the-accessible-databases `__ + - `list-the-accessible-databases `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database/user") @@ -417,7 +417,7 @@ async def create_database( DatabaseCreateError: If creation fails. References: - - `create-a-database `__ + - `create-a-database `__ """ # noqa: E501 data: Json = {"name": name} @@ -478,7 +478,7 @@ async def delete_database( DatabaseDeleteError: If deletion fails. References: - - `drop-a-database `__ + - `drop-a-database `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/database/{name}") @@ -533,7 +533,7 @@ async def collections( CollectionListError: If retrieval fails. References: - - `list-all-collections `__ + - `list-all-collections `__ """ # noqa: E501 params: Params = {} if exclude_system is not None: @@ -661,7 +661,7 @@ async def create_collection( CollectionCreateError: If the operation fails. References: - - `create-a-collection `__ + - `create-a-collection `__ """ # noqa: E501 data: Json = {"name": name} if col_type is not None: @@ -751,7 +751,7 @@ async def delete_collection( CollectionDeleteError: If the operation fails. References: - - `drop-a-collection `__ + - `drop-a-collection `__ """ # noqa: E501 params: Params = {} if is_system is not None: @@ -782,7 +782,7 @@ async def key_generators(self) -> Result[List[str]]: CollectionKeyGeneratorsError: If retrieval fails. References: - - `get-the-available-key-generators `__ + - `get-the-available-key-generators `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/key-generators") @@ -821,7 +821,7 @@ async def has_document( DocumentGetError: If retrieval fails. References: - - `get-a-document-header `__ + - `get-a-document-header `__ """ # noqa: E501 col = Collection.get_col_name(document) return await self.collection(col).has( @@ -858,7 +858,7 @@ async def document( DocumentParseError: If the document is malformed. References: - - `get-a-document `__ + - `get-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -927,7 +927,7 @@ async def insert_document( DocumentParseError: If the document is malformed. References: - - `create-a-document `__ + - `create-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection(collection) return await col.insert( @@ -998,7 +998,7 @@ async def update_document( DocumentUpdateError: If update fails. References: - - `update-a-document `__ + - `update-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -1063,7 +1063,7 @@ async def replace_document( DocumentReplaceError: If replace fails. References: - - `replace-a-document `__ + - `replace-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -1124,7 +1124,7 @@ async def delete_document( DocumentDeleteError: If deletion fails. References: - - `remove-a-document `__ + - `remove-a-document `__ """ # noqa: E501 col: StandardCollection[Json, Json, Jsons] = self.collection( Collection.get_col_name(document) @@ -1198,7 +1198,7 @@ async def graphs(self) -> Result[List[GraphProperties]]: GraphListError: If the operation fails. References: - - `list-all-graphs `__ + - `list-all-graphs `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/gharial") @@ -1253,7 +1253,7 @@ async def create_graph( GraphCreateError: If the operation fails. References: - - `create-a-graph `__ + - `create-a-graph `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1315,7 +1315,7 @@ async def delete_graph( GraphDeleteError: If the operation fails. References: - - `drop-a-graph `__ + - `drop-a-graph `__ """ # noqa: E501 params: Params = {} if drop_collections is not None: @@ -1347,8 +1347,8 @@ async def view(self, name: str) -> Result[Json]: ViewGetError: If the operation fails. References: - - `read-properties-of-a-view `__ - - `get-the-properties-of-a-view `__ + - `read-properties-of-a-view `__ + - `get-the-properties-of-a-view `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/view/{name}/properties") @@ -1372,8 +1372,8 @@ async def view_info(self, name: str) -> Result[Json]: ViewGetError: If the operation fails. References: - - `get-information-about-a-view `_ - - `get-information-about-a-view `__ + - `get-information-about-a-view `_ + - `get-information-about-a-view `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/view/{name}") @@ -1394,8 +1394,8 @@ async def views(self) -> Result[Jsons]: ViewListError: If the operation fails. References: - - `list-all-views `__ - - `list-all-views `__ + - `list-all-views `__ + - `list-all-views `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/view") @@ -1427,8 +1427,8 @@ async def create_view( ViewCreateError: If the operation fails. References: - - `create-a-search-alias-view `__ - - `create-an-arangosearch-view `__ + - `create-a-search-alias-view `__ + - `create-an-arangosearch-view `__ """ # noqa: E501 data: Json = {"name": name, "type": view_type} if properties is not None: @@ -1461,8 +1461,8 @@ async def replace_view(self, name: str, properties: Json) -> Result[Json]: ViewReplaceError: If the operation fails. References: - - `replace-the-properties-of-a-search-alias-view `__ - - `replace-the-properties-of-an-arangosearch-view `__ + - `replace-the-properties-of-a-search-alias-view `__ + - `replace-the-properties-of-an-arangosearch-view `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -1491,8 +1491,8 @@ async def update_view(self, name: str, properties: Json) -> Result[Json]: ViewUpdateError: If the operation fails. References: - - `update-the-properties-of-a-search-alias-view `__ - - `update-the-properties-of-an-arangosearch-view `__ + - `update-the-properties-of-a-search-alias-view `__ + - `update-the-properties-of-an-arangosearch-view `__ """ # noqa: E501 request = Request( method=Method.PATCH, @@ -1518,8 +1518,8 @@ async def rename_view(self, name: str, new_name: str) -> None: ViewRenameError: If the operation fails. References: - - `rename-a-view `__ - - `rename-a-view `__ + - `rename-a-view `__ + - `rename-a-view `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -1551,8 +1551,8 @@ async def delete_view( ViewDeleteError: If the operation fails. References: - - `drop-a-view `__ - - `drop-a-view `__ + - `drop-a-view `__ + - `drop-a-view `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/view/{name}") @@ -1575,7 +1575,7 @@ async def analyzers(self) -> Result[Jsons]: AnalyzerListError: If the operation fails. References: - - `list-all-analyzers `__ + - `list-all-analyzers `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/analyzer") @@ -1597,7 +1597,7 @@ async def analyzer(self, name: str) -> Result[Json]: dict: Analyzer properties. References: - - `get-an-analyzer-definition `__ + - `get-an-analyzer-definition `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/analyzer/{name}") @@ -1632,7 +1632,7 @@ async def create_analyzer( AnalyzerCreateError: If the operation fails. References: - - `create-an-analyzer `__ + - `create-an-analyzer `__ """ # noqa: E501 data: Json = {"name": name, "type": analyzer_type} if properties is not None: @@ -1671,7 +1671,7 @@ async def delete_analyzer( AnalyzerDeleteError: If the operation fails. References: - - `remove-an-analyzer `__ + - `remove-an-analyzer `__ """ # noqa: E501 params: Params = {} if force is not None: @@ -1728,7 +1728,7 @@ async def user(self, username: str) -> Result[UserInfo]: UserGetError: If the operation fails. References: - - `get-a-user` `__ + - `get-a-user` `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/user/{username}") @@ -1757,7 +1757,7 @@ async def users(self) -> Result[Sequence[UserInfo]]: UserListError: If the operation fails. References: - - `list-available-users `__ + - `list-available-users `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/user") @@ -1792,7 +1792,7 @@ async def create_user(self, user: UserInfo | Json) -> Result[UserInfo]: await db.create_user({user="john", password="secret"}) References: - - `create-a-user `__ + - `create-a-user `__ """ # noqa: E501 if isinstance(user, dict): user = UserInfo(**user) @@ -1832,7 +1832,7 @@ async def replace_user(self, user: UserInfo | Json) -> Result[UserInfo]: UserReplaceError: If the operation fails. References: - - `replace-a-user `__ + - `replace-a-user `__ """ # noqa: E501 if isinstance(user, dict): user = UserInfo(**user) @@ -1872,7 +1872,7 @@ async def update_user(self, user: UserInfo | Json) -> Result[UserInfo]: UserUpdateError: If the operation fails. References: - - `update-a-user `__ + - `update-a-user `__ """ # noqa: E501 if isinstance(user, dict): user = UserInfo(**user) @@ -1917,7 +1917,7 @@ async def delete_user( UserDeleteError: If the operation fails. References: - - `remove-a-user `__ + - `remove-a-user `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/user/{username}") @@ -1945,7 +1945,7 @@ async def permissions(self, username: str, full: bool = True) -> Result[Json]: PermissionListError: If the operation fails. References: - - `list-a-users-accessible-databases `__ + - `list-a-users-accessible-databases `__ """ # noqa: 501 request = Request( method=Method.GET, @@ -1981,8 +1981,8 @@ async def permission( PermissionGetError: If the operation fails. References: - - `get-a-users-database-access-level `__ - - `get-a-users-collection-access-level `__ + - `get-a-users-database-access-level `__ + - `get-a-users-collection-access-level `__ """ # noqa: 501 endpoint = f"/_api/user/{username}/database/{database}" if collection is not None: @@ -2022,8 +2022,8 @@ async def update_permission( is `False`. References: - - `set-a-users-database-access-level `__ - - `set-a-users-collection-access-level `__ + - `set-a-users-database-access-level `__ + - `set-a-users-collection-access-level `__ """ # noqa: E501 endpoint = f"/_api/user/{username}/database/{database}" if collection is not None: @@ -2067,8 +2067,8 @@ async def reset_permission( is `False`. References: - - `clear-a-users-database-access-level `__ - - `clear-a-users-collection-access-level `__ + - `clear-a-users-database-access-level `__ + - `clear-a-users-collection-access-level `__ """ # noqa: E501 endpoint = f"/_api/user/{username}/database/{database}" if collection is not None: @@ -2098,7 +2098,7 @@ async def jwt_secrets(self) -> Result[Json]: JWTSecretListError: If the operation fails. References: - - `get-information-about-the-loaded-jwt-secrets `__ + - `get-information-about-the-loaded-jwt-secrets `__ """ # noqa: 501 request = Request(method=Method.GET, endpoint="/_admin/server/jwt") @@ -2120,7 +2120,7 @@ async def reload_jwt_secrets(self) -> Result[Json]: JWTSecretReloadError: If the operation fails. References: - - `hot-reload-the-jwt-secrets-from-disk `__ + - `hot-reload-the-jwt-secrets-from-disk `__ """ # noqa: 501 request = Request( method=Method.POST, endpoint="/_admin/server/jwt", prefix_needed=False @@ -2155,7 +2155,7 @@ async def create_access_token( AccessTokenCreateError: If the operation fails. References: - - `create-an-access-token `__ + - `create-an-access-token `__ """ # noqa: E501 data: Json = { "name": name, @@ -2187,7 +2187,7 @@ async def delete_access_token(self, user: str, token_id: int) -> None: AccessTokenDeleteError: If the operation fails. References: - - `delete-an-access-token `__ + - `delete-an-access-token `__ """ # noqa: E501 request = Request( method=Method.DELETE, endpoint=f"/_api/token/{user}/{token_id}" @@ -2212,7 +2212,7 @@ async def list_access_tokens(self, user: str) -> Result[Jsons]: AccessTokenListError: If the operation fails. References: - - `list-all-access-tokens `__ + - `list-all-access-tokens `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/token/{user}") @@ -2238,7 +2238,7 @@ async def tls(self) -> Result[Json]: ServerTLSError: If the operation fails. References: - - `get-the-tls-data `__ + - `get-the-tls-data `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/server/tls") @@ -2262,7 +2262,7 @@ async def reload_tls(self) -> Result[Json]: ServerTLSReloadError: If the operation fails. References: - - `reload-the-tls-data `__ + - `reload-the-tls-data `__ """ # noqa: E501 request = Request(method=Method.POST, endpoint="/_admin/server/tls") @@ -2287,7 +2287,7 @@ async def encryption(self) -> Result[Json]: ServerEncryptionError: If the operation fails. References: - - `rotate-the-encryption-keys `__ + - `rotate-the-encryption-keys `__ """ # noqa: E501 request = Request(method=Method.POST, endpoint="/_admin/server/encryption") @@ -2360,7 +2360,7 @@ async def execute_transaction( TransactionExecuteError: If the operation fails on the server side. References: - - `execute-a-javascript-transaction `__ + - `execute-a-javascript-transaction `__ """ # noqa: 501 m = "JavaScript Transactions are deprecated from ArangoDB v3.12.0 onward and will be removed in a future version." # noqa: E501 warn(m, DeprecationWarning, stacklevel=2) @@ -2411,7 +2411,7 @@ async def version(self, details: bool = False) -> Result[Json]: ServerVersionError: If the operation fails on the server side. References: - - `get-the-server-version `__ + - `get-the-server-version `__ """ # noqa: E501 request = Request( method=Method.GET, endpoint="/_api/version", params={"details": details} @@ -2434,7 +2434,7 @@ async def tasks(self) -> Result[Jsons]: TaskListError: If the list cannot be retrieved. References: - - `list-all-tasks `__ + - `list-all-tasks `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/tasks") @@ -2459,7 +2459,7 @@ async def task(self, task_id: str) -> Result[Json]: TaskGetError: If the task details cannot be retrieved. References: - - `get-a-task `__ + - `get-a-task `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/tasks/{task_id}") @@ -2499,8 +2499,8 @@ async def create_task( TaskCreateError: If the task cannot be created. References: - - `create-a-task `__ - - `create-a-task-with-id `__ + - `create-a-task `__ + - `create-a-task-with-id `__ """ # noqa: E501 data: Json = {"command": command} if name is not None: @@ -2553,7 +2553,7 @@ async def delete_task( TaskDeleteError: If the operation fails. References: - - `delete-a-task `__ + - `delete-a-task `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/tasks/{task_id}") @@ -2576,7 +2576,7 @@ async def engine(self) -> Result[Json]: ServerEngineError: If the operation fails. References: - - `get-the-storage-engine-type `__ + - `get-the-storage-engine-type `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/engine") @@ -2598,7 +2598,7 @@ async def time(self) -> Result[datetime]: ServerTimeError: If the operation fails. References: - - `get-the-system-time `__ + - `get-the-system-time `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/time") @@ -2621,7 +2621,7 @@ async def check_availability(self) -> Result[str]: ServerCheckAvailabilityError: If the operation fails. References: - - `check-server-availability `__ + - `check-server-availability `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -2649,7 +2649,7 @@ async def support_info(self) -> Result[Json]: DatabaseSupportInfoError: If the operation fails. References: - - `get-information-about-the-deployment `__ + - `get-information-about-the-deployment `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/support-info") @@ -2672,7 +2672,7 @@ async def options(self) -> Result[Json]: ServerCurrentOptionsGetError: If the operation fails. References: - - `get-the-startup-option-configuration `__ + - `get-the-startup-option-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/options") @@ -2694,7 +2694,7 @@ async def options_available(self) -> Result[Json]: ServerAvailableOptionsGetError: If the operation fails. References: - - `get-the-available-startup-options `__ + - `get-the-available-startup-options `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/options-description") @@ -2716,7 +2716,7 @@ async def mode(self) -> Result[str]: ServerModeError: If the operation fails. References: - - `return-whether-or-not-a-server-is-in-read-only-mode `__ + - `return-whether-or-not-a-server-is-in-read-only-mode `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/server/mode") @@ -2740,7 +2740,7 @@ async def set_mode(self, mode: str) -> Result[str]: ServerModeSetError: If the operation fails. References: - - `set-the-server-mode-to-read-only-or-default `__ + - `set-the-server-mode-to-read-only-or-default `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -2766,7 +2766,7 @@ async def license(self) -> Result[Json]: ServerLicenseGetError: If the operation fails. References: - - `get-information-about-the-current-license `__ + - `get-information-about-the-current-license `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/license") @@ -2790,7 +2790,7 @@ async def set_license(self, license: str, force: Optional[bool] = False) -> None ServerLicenseSetError: If the operation fails. References: - - `set-a-new-license `__ + - `set-a-new-license `__ """ # noqa: E501 params: Params = {} if force is not None: @@ -2819,7 +2819,7 @@ async def shutdown(self, soft: Optional[bool] = None) -> None: ServerShutdownError: If the operation fails. References: - - `start-the-shutdown-sequence `__ + - `start-the-shutdown-sequence `__ """ # noqa: E501 params: Params = {} if soft is not None: @@ -2847,7 +2847,7 @@ async def shutdown_progress(self) -> Result[Json]: ServerShutdownProgressError: If the operation fails. References: - - `query-the-soft-shutdown-progress `__ + - `query-the-soft-shutdown-progress `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/shutdown") @@ -2884,7 +2884,7 @@ async def compact( DatabaseCompactError: If the operation fails. References: - - `compact-all-databases `__ + - `compact-all-databases `__ """ # noqa: E501 data = {} if change_level is not None: @@ -2911,7 +2911,7 @@ async def reload_routing(self) -> None: ServerReloadRoutingError: If the operation fails. References: - - `reload-the-routing-table `__ + - `reload-the-routing-table `__ """ # noqa: E501 request = Request(method=Method.POST, endpoint="/_admin/routing/reload") @@ -2934,7 +2934,7 @@ async def echo(self, body: Optional[Json] = None) -> Result[Json]: ServerEchoError: If the operation fails. References: - - `echo-a-request `__ + - `echo-a-request `__ """ # noqa: E501 data = body if body is not None else {} request = Request(method=Method.POST, endpoint="/_admin/echo", data=data) @@ -2960,7 +2960,7 @@ async def execute(self, command: str) -> Result[Any]: ServerExecuteError: If the execution fails. References: - - `execute-a-script `__ + - `execute-a-script `__ """ # noqa: E501 request = Request( method=Method.POST, endpoint="/_admin/execute", data=command.encode("utf-8") @@ -3002,7 +3002,7 @@ async def metrics(self, server_id: Optional[str] = None) -> Result[str]: ServerMetricsError: If the operation fails. References: - - `metrics-api-v2 `__ + - `metrics-api-v2 `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3058,7 +3058,7 @@ async def read_log_entries( ServerReadLogError: If the operation fails. References: - - `get-the-global-server-logs `__ + - `get-the-global-server-logs `__ """ # noqa: E501 params: Params = {} if upto is not None: @@ -3110,7 +3110,7 @@ async def log_levels( ServerLogLevelError: If the operation fails. References: - - `get-the-server-log-levels `__ + - `get-the-server-log-levels `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3166,7 +3166,7 @@ async def set_log_levels( ServerLogLevelSetError: If the operation fails. References: - - `set-the-structured-log-settings `__ + - `set-the-structured-log-settings `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3207,7 +3207,7 @@ async def reset_log_levels(self, server_id: Optional[str] = None) -> Result[Json ServerLogLevelResetError: If the operation fails. References: - - `reset-the-server-log-levels `__ + - `reset-the-server-log-levels `__ """ # noqa: E501 params: Params = {} if server_id is not None: @@ -3238,7 +3238,7 @@ async def log_settings(self) -> Result[Json]: ServerLogSettingError: If the operation fails. References: - - `get-the-structured-log-settings `__ + - `get-the-structured-log-settings `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -3279,7 +3279,7 @@ async def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: ServerLogSettingSetError: If the operation fails. References: - - `set-the-structured-log-settings `__ + - `set-the-structured-log-settings `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -3306,7 +3306,7 @@ async def api_calls(self) -> Result[Json]: ServerApiCallsError: If the operation fails. References: - - `get-recent-api-calls `__ + - `get-recent-api-calls `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -3379,7 +3379,7 @@ async def begin_transaction( TransactionInitError: If the operation fails on the server side. References: - - `begin-a-stream-transaction `__ + - `begin-a-stream-transaction `__ """ # noqa: E501 collections = dict() if read is not None: @@ -3463,7 +3463,7 @@ async def async_jobs( AsyncJobListError: If retrieval fails. References: - - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ + - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ """ # noqa: E501 params: Params = {} if count is not None: @@ -3496,7 +3496,7 @@ async def clear_async_jobs(self, threshold: Optional[float] = None) -> None: AsyncJobClearError: If the operation fails. References: - - `delete-async-job-results `__ + - `delete-async-job-results `__ """ # noqa: E501 if threshold is None: request = Request(method=Method.DELETE, endpoint="/_api/job/all") @@ -3516,7 +3516,7 @@ def response_handler(resp: Response) -> None: class TransactionDatabase(Database): """Database API tailored specifically for - `Stream Transactions `__. + `Stream Transactions `__. It allows you start a transaction, run multiple operations (eg. AQL queries) over a short period of time, and then commit or abort the transaction. @@ -3551,7 +3551,7 @@ async def transaction_status(self) -> str: TransactionStatusError: If the transaction is not found. References: - - `get-the-status-of-a-stream-transaction `__ + - `get-the-status-of-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -3573,7 +3573,7 @@ async def commit_transaction(self) -> None: TransactionCommitError: If the operation fails on the server side. References: - - `commit-a-stream-transaction `__ + - `commit-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -3593,7 +3593,7 @@ async def abort_transaction(self) -> None: TransactionAbortError: If the operation fails on the server side. References: - - `abort-a-stream-transaction `__ + - `abort-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -3620,7 +3620,7 @@ class AsyncDatabase(Database): and no results are stored on server. References: - - `jobs `__ + - `jobs `__ """ # noqa: E501 def __init__(self, connection: Connection, return_result: bool) -> None: diff --git a/arangoasync/foxx.py b/arangoasync/foxx.py index fe02b41..0b068da 100644 --- a/arangoasync/foxx.py +++ b/arangoasync/foxx.py @@ -65,7 +65,7 @@ async def services(self, exclude_system: Optional[bool] = False) -> Result[Jsons FoxxServiceListError: If retrieval fails. References: - - `list-the-installed-services `__ + - `list-the-installed-services `__ """ # noqa: E501 params: Params = {} if exclude_system is not None: @@ -98,7 +98,7 @@ async def service(self, mount: str) -> Result[Json]: FoxxServiceGetError: If retrieval fails. References: - - `get-the-service-description `__ + - `get-the-service-description `__ """ # noqa: E501 params: Params = {"mount": mount} request = Request( @@ -142,7 +142,7 @@ async def create_service( FoxxServiceCreateError: If installation fails. References: - - `install-a-new-service-mode `__ + - `install-a-new-service-mode `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -189,7 +189,7 @@ async def delete_service( FoxxServiceDeleteError: If operations fails. References: - - `uninstall-a-service `__ + - `uninstall-a-service `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -236,7 +236,7 @@ async def replace_service( FoxxServiceReplaceError: If replacement fails. References: - - `replace-a-service `__ + - `replace-a-service `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -298,7 +298,7 @@ async def update_service( FoxxServiceUpdateError: If upgrade fails. References: - - `upgrade-a-service `__ + - `upgrade-a-service `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -345,7 +345,7 @@ async def config(self, mount: str) -> Result[Json]: FoxxConfigGetError: If retrieval fails. References: - - `get-the-configuration-options `__ + - `get-the-configuration-options `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -375,7 +375,7 @@ async def update_config(self, mount: str, options: Json) -> Result[Json]: FoxxConfigUpdateError: If update fails. References: - - `update-the-configuration-options `__ + - `update-the-configuration-options `__ """ # noqa: E501 request = Request( method=Method.PATCH, @@ -407,7 +407,7 @@ async def replace_config(self, mount: str, options: Json) -> Result[Json]: FoxxConfigReplaceError: If replace fails. References: - - `replace-the-configuration-options `__ + - `replace-the-configuration-options `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -437,7 +437,7 @@ async def dependencies(self, mount: str) -> Result[Json]: FoxxDependencyGetError: If retrieval fails. References: - - `get-the-dependency-options `__ + - `get-the-dependency-options `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -467,7 +467,7 @@ async def update_dependencies(self, mount: str, options: Json) -> Result[Json]: FoxxDependencyUpdateError: If update fails. References: - - `update-the-dependency-options `__ + - `update-the-dependency-options `__ """ # noqa: E501 request = Request( method=Method.PATCH, @@ -498,7 +498,7 @@ async def replace_dependencies(self, mount: str, options: Json) -> Result[Json]: FoxxDependencyReplaceError: If replace fails. References: - - `replace-the-dependency-options `__ + - `replace-the-dependency-options `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -528,7 +528,7 @@ async def scripts(self, mount: str) -> Result[Json]: FoxxScriptListError: If retrieval fails. References: - - `list-the-service-scripts `__ + - `list-the-service-scripts `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -561,7 +561,7 @@ async def run_script( FoxxScriptRunError: If script fails. References: - - `run-a-service-script `__ + - `run-a-service-script `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -612,7 +612,7 @@ async def run_tests( FoxxTestRunError: If test fails. References: - - `run-the-service-tests `__ + - `run-the-service-tests `__ """ # noqa: E501 params: Params = dict() params["mount"] = mount @@ -665,7 +665,7 @@ async def enable_development(self, mount: str) -> Result[Json]: FoxxDevModeEnableError: If the operation fails. References: - - `enable-the-development-mode `__ + - `enable-the-development-mode `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -697,7 +697,7 @@ async def disable_development(self, mount: str) -> Result[Json]: FoxxDevModeDisableError: If the operation fails. References: - - `disable-the-development-mode `__ + - `disable-the-development-mode `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -726,7 +726,7 @@ async def readme(self, mount: str) -> Result[str]: FoxxReadmeGetError: If retrieval fails. References: - - `get-the-service-readme `__ + - `get-the-service-readme `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -754,7 +754,7 @@ async def swagger(self, mount: str) -> Result[Json]: FoxxSwaggerGetError: If retrieval fails. References: - - `get-the-swagger-description `__ + - `get-the-swagger-description `__ """ # noqa: E501 request = Request( method=Method.GET, endpoint="/_api/foxx/swagger", params={"mount": mount} @@ -785,7 +785,7 @@ async def download(self, mount: str) -> Result[bytes]: FoxxDownloadError: If download fails. References: - - `download-a-service-bundle `__ + - `download-a-service-bundle `__ """ # noqa: E501 request = Request( method=Method.POST, endpoint="/_api/foxx/download", params={"mount": mount} @@ -812,7 +812,7 @@ async def commit(self, replace: Optional[bool] = None) -> None: FoxxCommitError: If commit fails. References: - - `commit-the-local-service-state `__ + - `commit-the-local-service-state `__ """ # noqa: E501 params: Params = {} if replace is not None: diff --git a/arangoasync/graph.py b/arangoasync/graph.py index dbb9732..1fba982 100644 --- a/arangoasync/graph.py +++ b/arangoasync/graph.py @@ -93,7 +93,7 @@ async def properties(self) -> Result[GraphProperties]: GraphProperties: If the operation fails. References: - - `get-a-graph `__ + - `get-a-graph `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/gharial/{self._name}") @@ -132,7 +132,7 @@ async def vertex_collections(self) -> Result[List[str]]: VertexCollectionListError: If the operation fails. References: - - `list-vertex-collections `__ + - `list-vertex-collections `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -191,7 +191,7 @@ async def create_vertex_collection( VertexCollectionCreateError: If the operation fails. References: - - `add-a-vertex-collection `__ + - `add-a-vertex-collection `__ """ # noqa: E501 data: Json = {"collection": name} @@ -228,7 +228,7 @@ async def delete_vertex_collection(self, name: str, purge: bool = False) -> None VertexCollectionDeleteError: If the operation fails. References: - - `remove-a-vertex-collection `__ + - `remove-a-vertex-collection `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -300,7 +300,7 @@ async def vertex( DocumentParseError: If the document is malformed. References: - - `get-a-vertex `__ + - `get-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(vertex) return await self.vertex_collection(col).get( @@ -337,7 +337,7 @@ async def insert_vertex( DocumentParseError: If the document is malformed. References: - - `create-a-vertex `__ + - `create-a-vertex `__ """ # noqa: E501 return await self.vertex_collection(collection).insert( vertex, @@ -379,7 +379,7 @@ async def update_vertex( DocumentUpdateError: If update fails. References: - - `update-a-vertex `__ + - `update-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).update( @@ -425,7 +425,7 @@ async def replace_vertex( DocumentReplaceError: If replace fails. References: - - `replace-a-vertex `__ + - `replace-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).replace( @@ -468,7 +468,7 @@ async def delete_vertex( DocumentDeleteError: If deletion fails. References: - - `remove-a-vertex `__ + - `remove-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).delete( @@ -551,7 +551,7 @@ async def edge_collections(self) -> Result[List[str]]: EdgeCollectionListError: If the operation fails. References: - - `list-edge-collections `__ + - `list-edge-collections `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -602,7 +602,7 @@ async def create_edge_definition( EdgeDefinitionCreateError: If the operation fails. References: - - `add-an-edge-definition `__ + - `add-an-edge-definition `__ """ # noqa: E501 data: Json = { "collection": edge_collection, @@ -659,7 +659,7 @@ async def replace_edge_definition( EdgeDefinitionReplaceError: If the operation fails. References: - - `replace-an-edge-definition `__ + - `replace-an-edge-definition `__ """ # noqa: E501 data: Json = { "collection": edge_collection, @@ -712,7 +712,7 @@ async def delete_edge_definition( EdgeDefinitionDeleteError: If the operation fails. References: - - `remove-an-edge-definition `__ + - `remove-an-edge-definition `__ """ # noqa: E501 params: Params = {} if drop_collections is not None: @@ -793,7 +793,7 @@ async def edge( DocumentParseError: If the document is malformed. References: - - `get-an-edge `__ + - `get-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(edge) return await self.edge_collection(col).get( @@ -832,7 +832,7 @@ async def insert_edge( DocumentParseError: If the document is malformed. References: - - `create-an-edge `__ + - `create-an-edge `__ """ # noqa: E501 return await self.edge_collection(collection).insert( edge, @@ -875,7 +875,7 @@ async def update_edge( DocumentUpdateError: If update fails. References: - - `update-an-edge `__ + - `update-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).update( @@ -923,7 +923,7 @@ async def replace_edge( DocumentReplaceError: If replace fails. References: - - `replace-an-edge `__ + - `replace-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).replace( @@ -967,7 +967,7 @@ async def delete_edge( DocumentDeleteError: If deletion fails. References: - - `remove-an-edge `__ + - `remove-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).delete( @@ -1001,7 +1001,7 @@ async def edges( EdgeListError: If retrieval fails. References: - - `get-inbound-and-outbound-edges `__ + - `get-inbound-and-outbound-edges `__ """ # noqa: E501 return await self.edge_collection(collection).edges( vertex, diff --git a/arangoasync/job.py b/arangoasync/job.py index 9f64764..bec3c6a 100644 --- a/arangoasync/job.py +++ b/arangoasync/job.py @@ -27,7 +27,7 @@ class AsyncJob(Generic[T]): response_handler: HTTP response handler References: - - `jobs `__ + - `jobs `__ """ # noqa: E501 def __init__( @@ -68,7 +68,7 @@ async def status(self) -> str: AsyncJobStatusError: If retrieval fails or the job is not found. References: - - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ + - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/job/{self._id}") response = await self._conn.send_request(request) @@ -101,7 +101,7 @@ async def result(self) -> T: is still pending. References: - - `get-the-results-of-an-async-job `__ + - `get-the-results-of-an-async-job `__ """ # noqa: E501 request = Request(method=Method.PUT, endpoint=f"/_api/job/{self._id}") response = await self._conn.send_request(request) @@ -142,7 +142,7 @@ async def cancel(self, ignore_missing: bool = False) -> bool: AsyncJobCancelError: If cancellation fails. References: - - `cancel-an-async-job `__ + - `cancel-an-async-job `__ """ # noqa: E501 request = Request(method=Method.PUT, endpoint=f"/_api/job/{self._id}/cancel") response = await self._conn.send_request(request) @@ -173,7 +173,7 @@ async def clear( AsyncJobClearError: If deletion fails. References: - - `delete-async-job-results `__ + - `delete-async-job-results `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/job/{self._id}") resp = await self._conn.send_request(request) diff --git a/arangoasync/replication.py b/arangoasync/replication.py index 436dc94..e495e89 100644 --- a/arangoasync/replication.py +++ b/arangoasync/replication.py @@ -64,7 +64,7 @@ async def inventory( ReplicationInventoryError: If retrieval fails. References: - - `get-a-replication-inventory `__ + - `get-a-replication-inventory `__ """ # noqa: E501 params: Params = dict() params["batchId"] = batch_id @@ -112,7 +112,7 @@ async def dump( ReplicationDumpError: If retrieval fails. References: - - `get-a-replication-dump `__ + - `get-a-replication-dump `__ """ # noqa: E501 params: Params = dict() params["collection"] = collection @@ -149,7 +149,7 @@ async def cluster_inventory( ReplicationClusterInventoryError: If retrieval fails. References: - - `get-the-cluster-collections-and-indexes `__ + - `get-the-cluster-collections-and-indexes `__ """ # noqa: E501 params: Params = {} if include_system is not None: @@ -179,7 +179,7 @@ async def logger_state(self) -> Result[Json]: ReplicationLoggerStateError: If retrieval fails. References: - - `get-the-replication-logger-state `__ + - `get-the-replication-logger-state `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -204,7 +204,7 @@ async def applier_config(self) -> Result[Json]: ReplicationApplierConfigError: If retrieval fails. References: - - `get-the-replication-applier-configuration `__ + - `get-the-replication-applier-configuration `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -229,7 +229,7 @@ async def applier_state(self) -> Result[Json]: ReplicationApplierStateError: If retrieval fails. References: - - `get-the-replication-applier-state `__ + - `get-the-replication-applier-state `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -254,7 +254,7 @@ async def server_id(self) -> Result[str]: ReplicationServerIDError: If retrieval fails. References: - - `get-the-replication-server-id `__ + - `get-the-replication-server-id `__ """ # noqa: E501 request = Request( method=Method.GET, diff --git a/arangoasync/typings.py b/arangoasync/typings.py index d6adb4d..cd1c472 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -223,7 +223,7 @@ class KeyOptions(JsonWrapper): } References: - - `create-a-collection `__ + - `create-a-collection `__ """ # noqa: E501 def __init__( @@ -310,7 +310,7 @@ class CollectionInfo(JsonWrapper): } References: - - `get-the-collection-information `__ + - `get-the-collection-information `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -384,7 +384,7 @@ class UserInfo(JsonWrapper): } References: - - `create-a-user `__ + - `create-a-user `__ """ # noqa: E501 def __init__( @@ -484,7 +484,7 @@ class ServerStatusInformation(JsonWrapper): } References: - - `get-server-status-information `__ + - `get-server-status-information `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -543,7 +543,7 @@ class DatabaseProperties(JsonWrapper): """Properties of the database. References: - - `get-information-about-the-current-database `__ + - `get-information-about-the-current-database `__ """ # noqa: E501 def __init__(self, data: Json, strip_result: bool = False) -> None: @@ -650,7 +650,7 @@ class CollectionProperties(JsonWrapper): } References: - - `get-the-properties-of-a-collection `__ + - `get-the-properties-of-a-collection `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -870,7 +870,7 @@ class CollectionStatistics(JsonWrapper): } References: - - `get-the-collection-statistics `__ + - `get-the-collection-statistics `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -976,7 +976,7 @@ class IndexProperties(JsonWrapper): } References: - - `get-an-index `__ + - `get-an-index `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1253,7 +1253,7 @@ class QueryProperties(JsonWrapper): } References: - - `create-a-cursor `__ + - `create-a-cursor `__ """ # noqa: E501 def __init__( @@ -1414,7 +1414,7 @@ class QueryExecutionPlan(JsonWrapper): """The execution plan of an AQL query. References: - - `plan `__ + - `plan `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1468,7 +1468,7 @@ class QueryExecutionProfile(JsonWrapper): } References: - - `profile `__ + - `profile `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1536,7 +1536,7 @@ class QueryExecutionStats(JsonWrapper): } References: - - `stats `__ + - `stats `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1615,7 +1615,7 @@ class QueryExecutionExtra(JsonWrapper): """Extra information about the query result. References: - - `extra `__ + - `extra `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1659,7 +1659,7 @@ class QueryTrackingConfiguration(JsonWrapper): } References: - - `get-the-aql-query-tracking-configuration `__ + - `get-the-aql-query-tracking-configuration `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1718,7 +1718,7 @@ class QueryExplainOptions(JsonWrapper): } References: - - `explain-an-aql-query `__ + - `explain-an-aql-query `__ """ # noqa: E501 def __init__( @@ -1764,8 +1764,8 @@ class QueryCacheProperties(JsonWrapper): } References: - - `get-the-aql-query-results-cache-configuration `__ - - `set-the-aql-query-results-cache-configuration `__ + - `get-the-aql-query-results-cache-configuration `__ + - `set-the-aql-query-results-cache-configuration `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1818,9 +1818,9 @@ class GraphProperties(JsonWrapper): } References: - - `get-a-graph `__ - - `list-all-graphs `__ - - `create-a-graph `__ + - `get-a-graph `__ + - `list-all-graphs `__ + - `create-a-graph `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1927,7 +1927,7 @@ class GraphOptions(JsonWrapper): graph. References: - - `create-a-graph `__ + - `create-a-graph `__ """ # noqa: E501 def __init__( @@ -1982,7 +1982,7 @@ class VertexCollectionOptions(JsonWrapper): be a string and a valid collection name. References: - - `add-a-vertex-collection `__ + - `add-a-vertex-collection `__ """ # noqa: E501 def __init__( @@ -2009,7 +2009,7 @@ class EdgeDefinitionOptions(JsonWrapper): be a string and a valid collection name. References: - - `add-an-edge-definition `__ + - `add-an-edge-definition `__ """ # noqa: E501 def __init__( @@ -2043,7 +2043,7 @@ class AccessToken(JsonWrapper): } References: - - `create-an-access-token `__ + - `create-an-access-token `__ """ # noqa: E501 def __init__(self, data: Json) -> None: diff --git a/arangoasync/version.py b/arangoasync/version.py index 92192ee..68cdeee 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "1.0.4" +__version__ = "1.0.5" diff --git a/docs/certificates.rst b/docs/certificates.rst index c0ba7af..f8fa1e5 100644 --- a/docs/certificates.rst +++ b/docs/certificates.rst @@ -129,4 +129,4 @@ See the `ArangoDB Manual`_ for more information on security features. # Reload TLS data tls = await db.reload_tls() -.. _ArangoDB Manual: https://docs.arango.ai/stable/develop/http-api/security/ +.. _ArangoDB Manual: https://docs.arango.ai/arangodb/arangodb/stable/develop/http-api/security/ diff --git a/docs/overview.rst b/docs/overview.rst index 77c0fc7..38ecfd7 100644 --- a/docs/overview.rst +++ b/docs/overview.rst @@ -64,7 +64,7 @@ You may also use the client without a context manager, but you must ensure to cl Another example with `graphs`_: -.. _graphs: https://docs.arango.ai/stable/graphs/ +.. _graphs: https://docs.arango.ai/arangodb/stable/graphs/ .. code-block:: python diff --git a/docs/view.rst b/docs/view.rst index 3a1ef06..5ab61e9 100644 --- a/docs/view.rst +++ b/docs/view.rst @@ -63,7 +63,7 @@ management, refer to `ArangoDB Manual`_. For more information on the content of view **properties**, see `Search Alias Views`_ and `Arangosearch Views`_. -.. _Search Alias Views: https://docs.arango.ai/stable/develop/http-api/views/search-alias-views/ -.. _Arangosearch Views: https://docs.arango.ai/stable/develop/http-api/views/arangosearch-views/ +.. _Search Alias Views: https://docs.arango.ai/arangodb/stable/develop/http-api/views/search-alias-views/ +.. _Arangosearch Views: https://docs.arango.ai/arangodb/stable/develop/http-api/views/arangosearch-views/ Refer to :class:`arangoasync.database.StandardDatabase` class for API specification.