diff --git a/.circleci/config.yml b/.circleci/config.yml index fb1bc8e..cb02c17 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,7 +7,7 @@ executors: resource_class: small python-vm: machine: - image: ubuntu-2204:current + image: ubuntu-2404:current resource_class: medium workflows: @@ -20,8 +20,8 @@ workflows: parameters: python_version: ["3.10", "3.11", "3.12"] arangodb_config: ["single", "cluster"] - arangodb_license: ["community", "enterprise"] - arangodb_version: ["3.11", "3.12"] + arangodb_license: ["enterprise"] + arangodb_version: ["3.12"] jobs: lint: @@ -86,8 +86,8 @@ jobs: args+=("--cluster" "--port=8539" "--port=8549") fi - if [ << parameters.arangodb_license >> = "enterprise" ]; then - args+=("--enterprise") + if [ << parameters.arangodb_license >> != "enterprise" ]; then + args+=("--skip" "enterprise") fi echo "Running pytest with args: ${args[@]}" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 375d8b0..f2899c8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,7 +10,7 @@ pre-commit install # Install git pre-commit hooks Run unit tests with coverage: ```shell -pytest --cov=arango --cov-report=html # Open htmlcov/index.html in your browser +pytest --cluster --cov=arango --cov-report=html # Open htmlcov/index.html in your browser ``` To start and ArangoDB instance locally, run: diff --git a/README.md b/README.md index 507c3e9..e35c413 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Logo](docs/static/logo.png) +![Logo](https://raw.githubusercontent.com/arangodb/python-arango-async/refs/heads/main/docs/static/logo.png) [![CircleCI](https://dl.circleci.com/status-badge/img/gh/arangodb/python-arango-async/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/arangodb/python-arango-async/tree/main) [![CodeQL](https://github.com/arangodb/python-arango-async/actions/workflows/codeql.yaml/badge.svg)](https://github.com/arangodb/python-arango-async/actions/workflows/codeql.yaml) @@ -20,7 +20,7 @@ database natively supporting documents, graphs and search. This is the _asyncio_ alternative of the [python-arango](https://github.com/arangodb/python-arango) driver. -**Note: This project is still in active development, features might be added or removed.** +Check out a demo app at [python-arango-async-demo](https://github.com/apetenchea/python-arango-async-demo). ## Requirements @@ -75,7 +75,7 @@ async def main(): student_names.append(doc["name"]) ``` -Another example with [graphs](https://docs.arangodb.com/stable/graphs/): +Another example with [graphs](https://docs.arango.ai/arangodb/stable/graphs/): ```python async def main(): diff --git a/arangoasync/aql.py b/arangoasync/aql.py index 57d57e1..ea57b75 100644 --- a/arangoasync/aql.py +++ b/arangoasync/aql.py @@ -16,6 +16,7 @@ AQLQueryClearError, AQLQueryExecuteError, AQLQueryExplainError, + AQLQueryHistoryError, AQLQueryKillError, AQLQueryListError, AQLQueryRulesGetError, @@ -77,7 +78,7 @@ async def entries(self) -> Result[Jsons]: AQLCacheEntriesError: If retrieval fails. References: - - `list-the-entries-of-the-aql-query-results-cache `__ + - `list-the-entries-of-the-aql-query-results-cache `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-cache/entries") @@ -98,7 +99,7 @@ async def plan_entries(self) -> Result[Jsons]: AQLCacheEntriesError: If retrieval fails. References: - - `list-the-entries-of-the-aql-query-plan-cache `__ + - `list-the-entries-of-the-aql-query-plan-cache `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-plan-cache") @@ -116,7 +117,7 @@ async def clear(self) -> Result[None]: AQLCacheClearError: If clearing the cache fails. References: - - `clear-the-aql-query-results-cache `__ + - `clear-the-aql-query-results-cache `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint="/_api/query-cache") @@ -133,7 +134,7 @@ async def clear_plan(self) -> Result[None]: AQLCacheClearError: If clearing the cache fails. References: - - `clear-the-aql-query-plan-cache `__ + - `clear-the-aql-query-plan-cache `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint="/_api/query-plan-cache") @@ -153,7 +154,7 @@ async def properties(self) -> Result[QueryCacheProperties]: AQLCachePropertiesError: If retrieval fails. References: - - `get-the-aql-query-results-cache-configuration `__ + - `get-the-aql-query-results-cache-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query-cache/properties") @@ -192,7 +193,7 @@ async def configure( AQLCacheConfigureError: If setting the configuration fails. References: - - `set-the-aql-query-results-cache-configuration `__ + - `set-the-aql-query-results-cache-configuration `__ """ # noqa: E501 data: Json = dict() if mode is not None: @@ -297,7 +298,7 @@ async def execute( Cursor: Result cursor. References: - - `create-a-cursor `__ + - `create-a-cursor `__ """ # noqa: E501 data: Json = dict(query=query) if count is not None: @@ -352,7 +353,7 @@ async def tracking(self) -> Result[QueryTrackingConfiguration]: AQLQueryTrackingGetError: If retrieval fails. References: - - `get-the-aql-query-tracking-configuration `__ + - `get-the-aql-query-tracking-configuration `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query/properties") @@ -396,7 +397,7 @@ async def set_tracking( AQLQueryTrackingSetError: If setting the configuration fails. References: - - `update-the-aql-query-tracking-configuration `__ + - `update-the-aql-query-tracking-configuration `__ """ # noqa: E501 data: Json = dict() @@ -426,6 +427,25 @@ def response_handler(resp: Response) -> QueryTrackingConfiguration: return await self._executor.execute(request, response_handler) + async def history(self) -> Result[Json]: + """Return recently executed AQL queries (admin only). + + Returns: + dict: AQL query history. + + Raises: + AQLQueryHistoryError: If retrieval fails. + """ + request = Request(method=Method.GET, endpoint="/_admin/server/aql-queries") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLQueryHistoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + async def queries(self, all_queries: bool = False) -> Result[Jsons]: """Return a list of currently running queries. @@ -442,7 +462,7 @@ async def queries(self, all_queries: bool = False) -> Result[Jsons]: AQLQueryListError: If retrieval fails. References: - - `list-the-running-queries `__ + - `list-the-running-queries `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -473,7 +493,7 @@ async def slow_queries(self, all_queries: bool = False) -> Result[Jsons]: AQLQueryListError: If retrieval fails. References: - - `list-the-slow-aql-queries `__ + - `list-the-slow-aql-queries `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -503,7 +523,7 @@ async def clear_slow_queries(self, all_queries: bool = False) -> Result[None]: AQLQueryClearError: If retrieval fails. References: - - `clear-the-list-of-slow-aql-queries `__ + - `clear-the-list-of-slow-aql-queries `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -540,7 +560,7 @@ async def kill( AQLQueryKillError: If killing the query fails. References: - - `kill-a-running-aql-query `__ + - `kill-a-running-aql-query `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -578,7 +598,7 @@ async def explain( AQLQueryExplainError: If retrieval fails. References: - - `explain-an-aql-query `__ + - `explain-an-aql-query `__ """ # noqa: E501 data: Json = dict(query=query) if bind_vars is not None: @@ -597,7 +617,7 @@ async def explain( def response_handler(resp: Response) -> Json: if not resp.is_success: raise AQLQueryExplainError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -614,7 +634,7 @@ async def validate(self, query: str) -> Result[Json]: AQLQueryValidateError: If validation fails. References: - - `parse-an-aql-query `__ + - `parse-an-aql-query `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -625,7 +645,7 @@ async def validate(self, query: str) -> Result[Json]: def response_handler(resp: Response) -> Json: if not resp.is_success: raise AQLQueryValidateError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -639,7 +659,7 @@ async def query_rules(self) -> Result[Jsons]: AQLQueryRulesGetError: If retrieval fails. References: - - `list-all-aql-optimizer-rules `__ + - `list-all-aql-optimizer-rules `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/query/rules") @@ -664,7 +684,7 @@ async def functions(self, namespace: Optional[str] = None) -> Result[Jsons]: AQLFunctionListError: If retrieval fails. References: - - `list-the-registered-user-defined-aql-functions `__ + - `list-the-registered-user-defined-aql-functions `__ """ # noqa: E501 params: Json = dict() if namespace is not None: @@ -706,7 +726,7 @@ async def create_function( AQLFunctionCreateError: If registration fails. References: - - `create-a-user-defined-aql-function `__ + - `create-a-user-defined-aql-function `__ """ # noqa: E501 request = Request( method=Method.POST, @@ -719,7 +739,7 @@ async def create_function( def response_handler(resp: Response) -> Json: if not resp.is_success: raise AQLFunctionCreateError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -745,7 +765,7 @@ async def delete_function( AQLFunctionDeleteError: If removal fails. References: - - `remove-a-user-defined-aql-function `__ + - `remove-a-user-defined-aql-function `__ """ # noqa: E501 params: Json = dict() if group is not None: @@ -760,6 +780,6 @@ def response_handler(resp: Response) -> Json: if not resp.is_success: if not (resp.status_code == HTTP_NOT_FOUND and ignore_missing): raise AQLFunctionDeleteError(resp, request) - return self.deserializer.loads(resp.raw_body) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) diff --git a/arangoasync/auth.py b/arangoasync/auth.py index 96e9b1b..a4df28f 100644 --- a/arangoasync/auth.py +++ b/arangoasync/auth.py @@ -20,8 +20,8 @@ class Auth: encoding (str): Encoding for the password (default: utf-8) """ - username: str - password: str + username: str = "" + password: str = "" encoding: str = "utf-8" diff --git a/arangoasync/backup.py b/arangoasync/backup.py new file mode 100644 index 0000000..7be69cd --- /dev/null +++ b/arangoasync/backup.py @@ -0,0 +1,295 @@ +__all__ = ["Backup"] + +from numbers import Number +from typing import Optional, cast + +from arangoasync.exceptions import ( + BackupCreateError, + BackupDeleteError, + BackupDownloadError, + BackupGetError, + BackupRestoreError, + BackupUploadError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons + + +class Backup: + """Backup API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def get(self, backup_id: Optional[str] = None) -> Result[Json]: + """Return backup details. + + Args: + backup_id (str | None): If set, the returned list is restricted to the + backup with the given id. + + Returns: + dict: Backup details. + + Raises: + BackupGetError: If the operation fails. + + References: + - `list-backups `__ + """ # noqa: E501 + data: Json = {} + if backup_id is not None: + data["id"] = backup_id + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/list", + data=self.serializer.dumps(data) if data else None, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def create( + self, + label: Optional[str] = None, + allow_inconsistent: Optional[bool] = None, + force: Optional[bool] = None, + timeout: Optional[Number] = None, + ) -> Result[Json]: + """Create a backup when the global write lock can be obtained. + + Args: + label (str | None): Label for this backup. If not specified, a UUID is used. + allow_inconsistent (bool | None): Allow inconsistent backup when the global + transaction lock cannot be acquired before timeout. + force (bool | None): Forcefully abort all running transactions to ensure a + consistent backup when the global transaction lock cannot be + acquired before timeout. Default (and highly recommended) value + is `False`. + timeout (float | None): The time in seconds that the operation tries to + get a consistent snapshot. + + Returns: + dict: Backup information. + + Raises: + BackupCreateError: If the backup creation fails. + + References: + - `create-backup `__ + """ # noqa: E501 + data: Json = {} + if label is not None: + data["label"] = label + if allow_inconsistent is not None: + data["allowInconsistent"] = allow_inconsistent + if force is not None: + data["force"] = force + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/create", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def restore(self, backup_id: str) -> Result[Json]: + """Restore a local backup. + + Args: + backup_id (str): Backup ID. + + Returns: + dict: Result of the restore operation. + + Raises: + BackupRestoreError: If the restore operation fails. + + References: + - `restore-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/restore", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupRestoreError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def delete(self, backup_id: str) -> None: + """Delete a backup. + + Args: + backup_id (str): Backup ID. + + Raises: + BackupDeleteError: If the delete operation fails. + + References: + - `delete-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/delete", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise BackupDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def upload( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + upload_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup uploads. + + Args: + backup_id (str | None): Backup ID used for scheduling an upload. Mutually + exclusive with parameter **upload_id**. + repository (str | None): Remote repository URL(e.g. "local://tmp/backups"). + abort (str | None): If set to `True`, running upload is aborted. Used with + parameter **upload_id**. + config (dict | None): Remote repository configuration. Required for scheduling + an upload and mutually exclusive with parameter **upload_id**. + upload_id (str | None): Upload ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Upload details. + + Raises: + BackupUploadError: If upload operation fails. + + References: + - `upload-a-backup-to-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if upload_id is not None: + data["uploadId"] = upload_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/upload", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupUploadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def download( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + download_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup downloads. + + Args: + backup_id (str | None): Backup ID used for scheduling a download. Mutually + exclusive with parameter **download_id**. + repository (str | None): Remote repository URL (e.g. "local://tmp/backups"). + abort (bool | None): If set to `True`, running download is aborted. + config (dict | None): Remote repository configuration. Required for scheduling + a download and mutually exclusive with parameter **download_id**. + download_id (str | None): Download ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Download details. + + Raises: + BackupDownloadError: If the download operation fails. + + References: + - `download-a-backup-from-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if download_id is not None: + data["downloadId"] = download_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/download", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupDownloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/client.py b/arangoasync/client.py index 235cfae..b2eed10 100644 --- a/arangoasync/client.py +++ b/arangoasync/client.py @@ -147,7 +147,7 @@ async def db( self, name: str, auth_method: str = "basic", - auth: Optional[Auth] = None, + auth: Optional[Auth | str] = None, token: Optional[JwtToken] = None, verify: bool = False, compression: Optional[CompressionManager] = None, @@ -169,7 +169,8 @@ async def db( and client are synchronized. - "superuser": Superuser JWT authentication. The `token` parameter is required. The `auth` parameter is ignored. - auth (Auth | None): Login information. + auth (Auth | None): Login information (username and password) or + access token. token (JwtToken | None): JWT token. verify (bool): Verify the connection by sending a test request. compression (CompressionManager | None): If set, supersedes the @@ -188,6 +189,9 @@ async def db( """ connection: Connection + if isinstance(auth, str): + auth = Auth(password=auth) + if auth_method == "basic": if auth is None: raise ValueError("Basic authentication requires the `auth` parameter") diff --git a/arangoasync/cluster.py b/arangoasync/cluster.py new file mode 100644 index 0000000..fa42ea3 --- /dev/null +++ b/arangoasync/cluster.py @@ -0,0 +1,451 @@ +__all__ = ["Cluster"] + +from typing import List, Optional, cast + +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Cluster: + """Cluster-specific endpoints.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def health(self) -> Result[Json]: + """Queries the health of the cluster. + + Returns: + dict: Health status of the cluster. + + Raises: + ClusterHealthError: If retrieval fails. + + References: + - `get-the-cluster-health `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/health", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterHealthError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def statistics(self, db_server: str) -> Result[Json]: + """Queries the statistics of the given DB-Server. + + Args: + db_server (str): The ID of the DB-Server. + + Returns: + dict: Statistics of the DB-Server. + + Raises: + ClusterStatisticsError: If retrieval fails. + + References: + - `get-the-statistics-of-a-db-server `__ + """ # noqa: E501 + params: Params = {"DBserver": db_server} + + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/statistics", + prefix_needed=False, + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterStatisticsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def endpoints(self) -> Result[List[str]]: + """Fetch all coordinator endpoints. + + Returns: + list: List of coordinator endpoints. + + Raises: + ClusterEndpointsError: If retrieval fails. + + References: + - `list-all-coordinator-endpoints `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/cluster/endpoints", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise ClusterEndpointsError(resp, request) + body: Json = self.deserializer.loads(resp.raw_body) + return [item["endpoint"] for item in body["endpoints"]] + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Get the ID of the current server. + + Returns: + str: Server ID. + + Raises: + ClusterServerIDError: If retrieval fails. + + References: + - `get-the-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/id", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerIDError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["id"]) + + return await self._executor.execute(request, response_handler) + + async def server_role(self) -> Result[str]: + """Get the role of the current server + + Returns: + str: Server role. Possible values: "SINGLE", "COORDINATOR", "PRIMARY", "SECONDARY", "AGENT", "UNDEFINED". + + Raises: + ClusterServerRoleError: If retrieval fails. + + References: + - `get-the-server-role `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/role", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerRoleError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["role"]) + + return await self._executor.execute(request, response_handler) + + async def toggle_maintenance_mode(self, mode: str) -> Result[Json]: + """Enable or disable the cluster supervision (agency) maintenance mode. + + Args: + mode (str): Maintenance mode. Allowed values are "on" or "off". + + Returns: + dict: Result of the operation. + + Raises: + ClusterMaintenanceModeError: If the toggle operation fails. + + References: + - `toggle-cluster-maintenance-mode `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/maintenance", + prefix_needed=False, + data=f'"{mode}"', + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def server_maintenance_mode(self, server_id: str) -> Result[Json]: + """Check whether the specified DB-Server is in maintenance mode and until when. + + Args: + server_id (str): Server ID. + + Returns: + dict: Maintenance status for the given server. + + Raises: + ClusterMaintenanceModeError: If retrieval fails. + + References: + - `get-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def toggle_server_maintenance_mode( + self, server_id: str, mode: str, timeout: Optional[int] = None + ) -> None: + """Enable or disable the maintenance mode for the given server. + + Args: + server_id (str): Server ID. + mode (str): Maintenance mode. Allowed values are "normal" and "maintenance". + timeout (int | None): After how many seconds the maintenance mode shall automatically end. + + Raises: + ClusterMaintenanceModeError: If the operation fails. + + References: + - `set-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + data: Json = {"mode": mode} + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.PUT, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + + await self._executor.execute(request, response_handler) + + async def calculate_imbalance(self) -> Result[Json]: + """Computes the current cluster imbalance and returns the result. + + Returns: + dict: Cluster imbalance information. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `get-the-current-cluster-imbalance `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/cluster/rebalance") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def calculate_rebalance_plan( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def rebalance( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute and execute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def execute_rebalance_plan( + self, + moves: List[Json], + version: int = 1, + ) -> Result[int]: + """Execute a set of move shard operations. + + Args: + moves (list): List of move shard operations to be executed. + version (int): Must be set to 1. + + Returns: + int: Indicates whether the methods have been accepted and scheduled for execution. + + Raises: + ClusterRebalanceError: If the execution fails. + + References: + - `execute-a-set-of-move-shard-operations `__ + """ # noqa: E501 + data: Json = dict(version=version, moves=moves) + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance/execute", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> int: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: int = self.deserializer.loads(resp.raw_body)["code"] + return result + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index c742714..cc372bf 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -16,7 +16,16 @@ HTTP_PRECONDITION_FAILED, ) from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, DocumentDeleteError, @@ -40,7 +49,9 @@ from arangoasync.result import Result from arangoasync.serialization import Deserializer, Serializer from arangoasync.typings import ( + CollectionInfo, CollectionProperties, + CollectionStatistics, IndexProperties, Json, Jsons, @@ -322,7 +333,7 @@ async def indexes( IndexListError: If retrieval fails. References: - - `list-all-indexes-of-a-collection `__ + - `list-all-indexes-of-a-collection `__ """ # noqa: E501 params: Params = dict(collection=self._name) if with_stats is not None: @@ -357,7 +368,7 @@ async def get_index(self, id: str | int) -> Result[IndexProperties]: IndexGetError: If retrieval fails. References: - `get-an-index `__ + `get-an-index `__ """ # noqa: E501 if isinstance(id, int): full_id = f"{self._name}/{id}" @@ -397,12 +408,12 @@ async def add_index( IndexCreateError: If index creation fails. References: - - `create-an-index `__ - - `create-a-persistent-index `__ - - `create-an-inverted-index `__ - - `create-a-ttl-index `__ - - `create-a-multi-dimensional-index `__ - - `create-a-geo-spatial-index `__ + - `create-an-index `__ + - `create-a-persistent-index `__ + - `create-an-inverted-index `__ + - `create-a-ttl-index `__ + - `create-a-multi-dimensional-index `__ + - `create-a-geo-spatial-index `__ """ # noqa: E501 options = options or {} request = Request( @@ -436,7 +447,7 @@ async def delete_index( IndexDeleteError: If deletion fails. References: - - `delete-an-index `__ + - `delete-an-index `__ """ # noqa: E501 if isinstance(id, int): full_id = f"{self._name}/{id}" @@ -467,7 +478,7 @@ async def load_indexes(self) -> Result[bool]: IndexLoadError: If loading fails. References: - - `load-collection-indexes-into-memory `__ + - `load-collection-indexes-into-memory `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -481,6 +492,26 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def recalculate_count(self) -> None: + """Recalculate the document count. + + Raises: + CollectionRecalculateCountError: If re-calculation fails. + + References: + - `recalculate-the-document-count-of-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/recalculateCount", + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRecalculateCountError(resp, request) + + await self._executor.execute(request, response_handler) + async def properties(self) -> Result[CollectionProperties]: """Return the full properties of the current collection. @@ -491,7 +522,7 @@ async def properties(self) -> Result[CollectionProperties]: CollectionPropertiesError: If retrieval fails. References: - - `get-the-properties-of-a-collection `__ + - `get-the-properties-of-a-collection `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -501,7 +532,129 @@ async def properties(self) -> Result[CollectionProperties]: def response_handler(resp: Response) -> CollectionProperties: if not resp.is_success: raise CollectionPropertiesError(resp, request) - return CollectionProperties(self._executor.deserialize(resp.raw_body)) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def configure( + self, + cache_enabled: Optional[bool] = None, + computed_values: Optional[Jsons] = None, + replication_factor: Optional[int | str] = None, + schema: Optional[Json] = None, + wait_for_sync: Optional[bool] = None, + write_concern: Optional[int] = None, + ) -> Result[CollectionProperties]: + """Changes the properties of a collection. + + Only the provided attributes are updated. + + Args: + cache_enabled (bool | None): Whether the in-memory hash cache + for documents should be enabled for this collection. + computed_values (list | None): An optional list of objects, each + representing a computed value. + replication_factor (int | None): In a cluster, this attribute determines + how many copies of each shard are kept on different DB-Servers. + For SatelliteCollections, it needs to be the string "satellite". + schema (dict | None): The configuration of the collection-level schema + validation for documents. + wait_for_sync (bool | None): If set to `True`, the data is synchronized + to disk before returning from a document create, update, replace or + removal operation. + write_concern (int | None): Determines how many copies of each shard are + required to be in sync on the different DB-Servers. + + Returns: + CollectionProperties: Properties. + + Raises: + CollectionConfigureError: If configuration fails. + + References: + - `change-the-properties-of-a-collection `__ + """ # noqa: E501 + data: Json = {} + if cache_enabled is not None: + data["cacheEnabled"] = cache_enabled + if computed_values is not None: + data["computedValues"] = computed_values + if replication_factor is not None: + data["replicationFactor"] = replication_factor + if schema is not None: + data["schema"] = schema + if wait_for_sync is not None: + data["waitForSync"] = wait_for_sync + if write_concern is not None: + data["writeConcern"] = write_concern + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/properties", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> CollectionProperties: + if not resp.is_success: + raise CollectionConfigureError(resp, request) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def rename(self, new_name: str) -> None: + """Rename the collection. + + Renames may not be reflected immediately in async execution, batch + execution or transactions. It is recommended to initialize new API + wrappers after a rename. + + Note: + Renaming collections is not supported in cluster deployments. + + Args: + new_name (str): New collection name. + + Raises: + CollectionRenameError: If rename fails. + + References: + - `rename-a-collection `__ + """ # noqa: E501 + data: Json = {"name": new_name} + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/rename", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRenameError(resp, request) + self._name = new_name + self._id_prefix = f"{new_name}/" + + await self._executor.execute(request, response_handler) + + async def compact(self) -> Result[CollectionInfo]: + """Compact a collection. + + Returns: + CollectionInfo: Collection information. + + Raises: + CollectionCompactError: If compaction fails. + + References: + - `compact-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/compact", + ) + + def response_handler(resp: Response) -> CollectionInfo: + if not resp.is_success: + raise CollectionCompactError(resp, request) + return CollectionInfo(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -524,7 +677,7 @@ async def truncate( CollectionTruncateError: If truncation fails. References: - - `truncate-a-collection `__ + - `truncate-a-collection `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -552,7 +705,10 @@ async def count(self) -> Result[int]: Raises: DocumentCountError: If retrieval fails. - """ + + References: + - `get-the-document-count-of-a-collection `__ + """ # noqa: E501 request = Request( method=Method.GET, endpoint=f"/_api/collection/{self.name}/count" ) @@ -565,6 +721,158 @@ def response_handler(resp: Response) -> int: return await self._executor.execute(request, response_handler) + async def statistics(self) -> Result[CollectionStatistics]: + """Get additional statistical information about the collection. + + Returns: + CollectionStatistics: Collection statistics. + + Raises: + CollectionStatisticsError: If retrieval fails. + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/figures", + ) + + def response_handler(resp: Response) -> CollectionStatistics: + if not resp.is_success: + raise CollectionStatisticsError(resp, request) + return CollectionStatistics(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def responsible_shard(self, document: Json) -> Result[str]: + """Return the ID of the shard responsible for given document. + + If the document does not exist, return the shard that would be + responsible. + + Args: + document (dict): Document body with "_key" field. + + Returns: + str: Shard ID. + + Raises: + CollectionResponsibleShardError: If retrieval fails. + + References: + - `get-the-responsible-shard-for-a-document `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/responsibleShard", + data=self.serializer.dumps(document), + ) + + def response_handler(resp: Response) -> str: + if resp.is_success: + body = self.deserializer.loads(resp.raw_body) + return cast(str, body["shardId"]) + raise CollectionResponsibleShardError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def shards(self, details: Optional[bool] = None) -> Result[Json]: + """Return collection shards and properties. + + Available only in a cluster setup. + + Args: + details (bool | None): If set to `True`, include responsible + servers for these shards. + + Returns: + dict: Collection shards. + + Raises: + CollectionShardsError: If retrieval fails. + + References: + - `get-the-shard-ids-of-a-collection `__ + """ # noqa: E501 + params: Params = {} + if details is not None: + params["details"] = details + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/shards", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise CollectionShardsError(resp, request) + return cast(Json, self.deserializer.loads(resp.raw_body)["shards"]) + + return await self._executor.execute(request, response_handler) + + async def revision(self) -> Result[str]: + """Return collection revision. + + Returns: + str: Collection revision. + + Raises: + CollectionRevisionError: If retrieval fails. + + References: + - `get-the-collection-revision-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/revision", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionRevisionError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["revision"]) + + return await self._executor.execute(request, response_handler) + + async def checksum( + self, with_rev: Optional[bool] = None, with_data: Optional[bool] = None + ) -> Result[str]: + """Calculate collection checksum. + + Args: + with_rev (bool | None): Include document revisions in checksum calculation. + with_data (bool | None): Include document data in checksum calculation. + + Returns: + str: Collection checksum. + + Raises: + CollectionChecksumError: If retrieval fails. + + References: + - `get-the-collection-checksum `__ + """ # noqa: E501 + params: Params = {} + if with_rev is not None: + params["withRevision"] = with_rev + if with_data is not None: + params["withData"] = with_data + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/checksum", + params=params, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionChecksumError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["checksum"]) + + return await self._executor.execute(request, response_handler) + async def has( self, document: str | Json, @@ -591,7 +899,7 @@ async def has( DocumentGetError: If retrieval fails. References: - - `get-a-document-header `__ + - `get-a-document-header `__ """ # noqa: E501 handle = self._get_doc_id(document) @@ -648,7 +956,7 @@ async def get_many( DocumentGetError: If retrieval fails. References: - - `get-multiple-documents `__ + - `get-multiple-documents `__ """ # noqa: E501 params: Params = {"onlyget": True} if ignore_revs is not None: @@ -975,7 +1283,7 @@ async def insert_many( DocumentInsertError: If insertion fails. References: - - `create-multiple-documents `__ + - `create-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1065,7 +1373,7 @@ async def replace_many( DocumentReplaceError: If replacing fails. References: - - `replace-multiple-documents `__ + - `replace-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1158,7 +1466,7 @@ async def update_many( DocumentUpdateError: If update fails. References: - - `update-multiple-documents `__ + - `update-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1240,7 +1548,7 @@ async def delete_many( DocumentRemoveError: If removal fails. References: - - `remove-multiple-documents `__ + - `remove-multiple-documents `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -1270,6 +1578,108 @@ def response_handler( return await self._executor.execute(request, response_handler) + async def import_bulk( + self, + documents: bytes | str, + doc_type: Optional[str] = None, + complete: Optional[bool] = True, + details: Optional[bool] = True, + from_prefix: Optional[str] = None, + to_prefix: Optional[str] = None, + overwrite: Optional[bool] = None, + overwrite_collection_prefix: Optional[bool] = None, + on_duplicate: Optional[str] = None, + wait_for_sync: Optional[bool] = None, + ignore_missing: Optional[bool] = None, + ) -> Result[Json]: + """Load JSON data in bulk into ArangoDB. + + Args: + documents (bytes | str): String representation of the JSON data to import. + doc_type (str | None): Determines how the body of the request is interpreted. + Possible values: "", "documents", "array", "auto". + complete (bool | None): If set to `True`, the whole import fails if any error occurs. + Otherwise, the import continues even if some documents are invalid and cannot + be imported, skipping the problematic documents. + details (bool | None): If set to `True`, the result includes a `details` + attribute with information about documents that could not be imported. + from_prefix (str | None): String prefix prepended to the value of "_from" + field in each edge document inserted. For example, prefix "foo" + prepended to "_from": "bar" will result in "_from": "foo/bar". + Applies only to edge collections. + to_prefix (str | None): String prefix prepended to the value of "_to" + field in each edge document inserted. For example, prefix "foo" + prepended to "_to": "bar" will result in "_to": "foo/bar". + Applies only to edge collections. + overwrite (bool | None): If set to `True`, all existing documents are removed + prior to the import. Indexes are still preserved. + overwrite_collection_prefix (bool | None): Force the `fromPrefix` and + `toPrefix`, possibly replacing existing collection name prefixes. + on_duplicate (str | None): Action to take on unique key constraint violations + (for documents with "_key" fields). Allowed values are "error" (do + not import the new documents and count them as errors), "update" + (update the existing documents while preserving any fields missing + in the new ones), "replace" (replace the existing documents with + new ones), and "ignore" (do not import the new documents and count + them as ignored, as opposed to counting them as errors). Options + "update" and "replace" may fail on secondary unique key constraint + violations. + wait_for_sync (bool | None): Block until operation is synchronized to disk. + ignore_missing (bool | None): When importing JSON arrays of tabular data + (type parameter is omitted), the first line of the request body defines + the attribute keys and the subsequent lines the attribute values for each + document. Subsequent lines with a different number of elements than the + first line are not imported by default. You can enable this option to + import them anyway. For the missing elements, the document attributes + are omitted. Excess elements are ignored. + + Returns: + dict: Result of the import operation. + + Raises: + DocumentInsertError: If import fails. + + References: + - `import-json-data-as-documents `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = self.name + if doc_type is not None: + params["type"] = doc_type + if complete is not None: + params["complete"] = complete + if details is not None: + params["details"] = details + if from_prefix is not None: + params["fromPrefix"] = from_prefix + if to_prefix is not None: + params["toPrefix"] = to_prefix + if overwrite is not None: + params["overwrite"] = overwrite + if overwrite_collection_prefix is not None: + params["overwriteCollectionPrefix"] = overwrite_collection_prefix + if on_duplicate is not None: + params["onDuplicate"] = on_duplicate + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if ignore_missing is not None: + params["ignoreMissing"] = ignore_missing + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DocumentInsertError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + request = Request( + method=Method.POST, + endpoint="/_api/import", + data=documents, + params=params, + ) + + return await self._executor.execute(request, response_handler) + class StandardCollection(Collection[T, U, V]): """Standard collection API wrapper. @@ -1320,7 +1730,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-a-document `__ + - `get-a-document `__ """ # noqa: E501 handle = self._get_doc_id(document) @@ -1408,7 +1818,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-a-document `__ + - `create-a-document `__ """ # noqa: E501 if isinstance(document, dict): document = cast(T, self._ensure_key_from_id(document)) @@ -1444,9 +1854,9 @@ async def insert( def response_handler(resp: Response) -> bool | Json: if resp.is_success: - if silent is True: + if silent: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_BAD_PARAMETER: msg = ( @@ -1513,7 +1923,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-a-document `__ + - `update-a-document `__ """ # noqa: E501 params: Params = {} if ignore_revs is not None: @@ -1551,7 +1961,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1607,7 +2017,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-a-document `__ + - `replace-a-document `__ """ # noqa: E501 params: Params = {} if ignore_revs is not None: @@ -1641,7 +2051,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1653,7 +2063,7 @@ def response_handler(resp: Response) -> bool | Json: async def delete( self, - document: T, + document: str | T, ignore_revs: Optional[bool] = None, ignore_missing: bool = False, wait_for_sync: Optional[bool] = None, @@ -1665,7 +2075,7 @@ async def delete( """Delete a document. Args: - document (dict): Document ID, key or body. The body must contain the + document (str | dict): Document ID, key or body. The body must contain the "_key" or "_id" field. ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the document is ignored. If this is set to `False`, then the `_rev` @@ -1695,8 +2105,10 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-a-document `__ + - `remove-a-document `__ """ # noqa: E501 + handle = self._get_doc_id(cast(str | Json, document)) + params: Params = {} if ignore_revs is not None: params["ignoreRevs"] = ignore_revs @@ -1715,7 +2127,7 @@ async def delete( request = Request( method=Method.DELETE, - endpoint=f"/_api/document/{self._extract_id(cast(Json, document))}", + endpoint=f"/_api/document/{handle}", params=params, headers=headers, ) @@ -1724,7 +2136,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1820,7 +2232,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-a-vertex `__ + - `get-a-vertex `__ """ # noqa: E501 handle = self._get_doc_id(vertex) @@ -1882,7 +2294,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-a-vertex `__ + - `create-a-vertex `__ """ # noqa: E501 if isinstance(vertex, dict): vertex = cast(T, self._ensure_key_from_id(vertex)) @@ -1947,7 +2359,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-a-vertex `__ + - `update-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2022,7 +2434,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-a-vertex `__ + - `replace-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2094,7 +2506,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-a-vertex `__ + - `remove-a-vertex `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2219,7 +2631,7 @@ async def get( DocumentParseError: If the document is malformed. References: - - `get-an-edge `__ + - `get-an-edge `__ """ # noqa: E501 handle = self._get_doc_id(edge) @@ -2282,7 +2694,7 @@ async def insert( DocumentParseError: If the document is malformed. References: - - `create-an-edge `__ + - `create-an-edge `__ """ # noqa: E501 if isinstance(edge, dict): edge = cast(T, self._ensure_key_from_id(edge)) @@ -2351,7 +2763,7 @@ async def update( DocumentUpdateError: If update fails. References: - - `update-an-edge `__ + - `update-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2430,7 +2842,7 @@ async def replace( DocumentReplaceError: If replace fails. References: - - `replace-an-edge `__ + - `replace-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2505,7 +2917,7 @@ async def delete( DocumentDeleteError: If deletion fails. References: - - `remove-an-edge `__ + - `remove-an-edge `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -2566,7 +2978,7 @@ async def edges( EdgeListError: If retrieval fails. References: - - `get-inbound-and-outbound-edges `__ + - `get-inbound-and-outbound-edges `__ """ # noqa: E501 params: Params = { "vertex": self._get_doc_id(vertex, validate=False), @@ -2590,10 +3002,7 @@ async def edges( def response_handler(resp: Response) -> Json: if not resp.is_success: raise EdgeListError(resp, request) - body = self.deserializer.loads(resp.raw_body) - for key in ("error", "code"): - body.pop(key) - return body + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) diff --git a/arangoasync/connection.py b/arangoasync/connection.py index f404248..5fa6363 100644 --- a/arangoasync/connection.py +++ b/arangoasync/connection.py @@ -160,7 +160,10 @@ def compress_request(self, request: Request) -> bool: return result - async def process_request(self, request: Request) -> Response: + async def process_request( + self, + request: Request, + ) -> Response: """Process request, potentially trying multiple hosts. Args: @@ -173,7 +176,8 @@ async def process_request(self, request: Request) -> Response: ConnectionAbortedError: If it can't connect to host(s) within limit. """ - request.endpoint = f"{self._db_endpoint}{request.endpoint}" + if request.prefix_needed: + request.endpoint = f"{self._db_endpoint}{request.endpoint}" host_index = self._host_resolver.get_host_index() for tries in range(self._host_resolver.max_tries): try: @@ -373,6 +377,7 @@ async def refresh_token(self) -> None: method=Method.POST, endpoint="/_open/auth", data=auth.encode("utf-8"), + prefix_needed=False, ) try: diff --git a/arangoasync/cursor.py b/arangoasync/cursor.py index 5339455..68ecdad 100644 --- a/arangoasync/cursor.py +++ b/arangoasync/cursor.py @@ -192,8 +192,8 @@ async def fetch(self, batch_id: Optional[str] = None) -> List[Any]: CursorStateError: If the cursor ID is not set. References: - - `read-the-next-batch-from-a-cursor `__ - - `read-a-batch-from-the-cursor-again `__ + - `read-the-next-batch-from-a-cursor `__ + - `read-a-batch-from-the-cursor-again `__ """ # noqa: E501 if self._id is None: raise CursorStateError("Cursor ID is not set") @@ -229,7 +229,7 @@ async def close(self, ignore_missing: bool = False) -> bool: CursorCloseError: If the cursor failed to close. References: - - `delete-a-cursor `__ + - `delete-a-cursor `__ """ # noqa: E501 if self._id is None: return False diff --git a/arangoasync/database.py b/arangoasync/database.py index 3cac02d..c1dc1b9 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -6,23 +6,36 @@ ] -from typing import Any, List, Optional, Sequence, TypeVar, cast +from datetime import datetime +from typing import Any, Dict, List, Optional, Sequence, TypeVar, cast from warnings import warn from arangoasync.aql import AQL -from arangoasync.collection import StandardCollection +from arangoasync.backup import Backup +from arangoasync.cluster import Cluster +from arangoasync.collection import Collection, StandardCollection from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND from arangoasync.exceptions import ( + AccessTokenCreateError, + AccessTokenDeleteError, + AccessTokenListError, + AnalyzerCreateError, + AnalyzerDeleteError, + AnalyzerGetError, + AnalyzerListError, AsyncJobClearError, AsyncJobListError, CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, GraphCreateError, GraphDeleteError, GraphListError, @@ -32,8 +45,37 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerApiCallsError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, + ServerEncryptionError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, + ServerModeError, + ServerModeSetError, + ServerReadLogError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, + ServerTLSError, + ServerTLSReloadError, ServerVersionError, + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, TransactionAbortError, TransactionCommitError, TransactionExecuteError, @@ -46,6 +88,13 @@ UserListError, UserReplaceError, UserUpdateError, + ViewCreateError, + ViewDeleteError, + ViewGetError, + ViewListError, + ViewRenameError, + ViewReplaceError, + ViewUpdateError, ) from arangoasync.executor import ( ApiExecutor, @@ -53,12 +102,15 @@ DefaultApiExecutor, TransactionApiExecutor, ) +from arangoasync.foxx import Foxx from arangoasync.graph import Graph +from arangoasync.replication import Replication from arangoasync.request import Method, Request from arangoasync.response import Response from arangoasync.result import Result from arangoasync.serialization import Deserializer, Serializer from arangoasync.typings import ( + AccessToken, CollectionInfo, CollectionType, DatabaseProperties, @@ -160,6 +212,42 @@ def aql(self) -> AQL: """ return AQL(self._executor) + @property + def backup(self) -> Backup: + """Return Backup API wrapper. + + Returns: + arangoasync.backup.Backup: Backup API wrapper. + """ + return Backup(self._executor) + + @property + def cluster(self) -> Cluster: + """Return Cluster API wrapper. + + Returns: + arangoasync.cluster.Cluster: Cluster API wrapper. + """ + return Cluster(self._executor) + + @property + def foxx(self) -> Foxx: + """Return Foxx API wrapper. + + Returns: + arangoasync.foxx.Foxx: Foxx API wrapper. + """ + return Foxx(self._executor) + + @property + def replication(self) -> Replication: + """Return Replication API wrapper. + + Returns: + Replication API wrapper. + """ + return Replication(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. @@ -170,7 +258,7 @@ async def properties(self) -> Result[DatabaseProperties]: DatabasePropertiesError: If retrieval fails. References: - - `get-information-about-the-current-database `__ + - `get-information-about-the-current-database `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database/current") @@ -193,7 +281,7 @@ async def status(self) -> Result[ServerStatusInformation]: ServerSatusError: If retrieval fails. References: - - `get-server-status-information `__ + - `get-server-status-information `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_admin/status") @@ -217,7 +305,7 @@ async def databases(self) -> Result[List[str]]: DatabaseListError: If retrieval fails. References: - - `list-all-databases `__ + - `list-all-databases `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database") @@ -245,7 +333,7 @@ async def databases_accessible_to_user(self) -> Result[List[str]]: DatabaseListError: If retrieval fails. References: - - `list-the-accessible-databases `__ + - `list-the-accessible-databases `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/database/user") @@ -329,7 +417,7 @@ async def create_database( DatabaseCreateError: If creation fails. References: - - `create-a-database `__ + - `create-a-database `__ """ # noqa: E501 data: Json = {"name": name} @@ -390,7 +478,7 @@ async def delete_database( DatabaseDeleteError: If deletion fails. References: - - `drop-a-database `__ + - `drop-a-database `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/database/{name}") @@ -445,7 +533,7 @@ async def collections( CollectionListError: If retrieval fails. References: - - `list-all-collections `__ + - `list-all-collections `__ """ # noqa: E501 params: Params = {} if exclude_system is not None: @@ -573,7 +661,7 @@ async def create_collection( CollectionCreateError: If the operation fails. References: - - `create-a-collection `__ + - `create-a-collection `__ """ # noqa: E501 data: Json = {"name": name} if col_type is not None: @@ -663,7 +751,7 @@ async def delete_collection( CollectionDeleteError: If the operation fails. References: - - `drop-a-collection `__ + - `drop-a-collection `__ """ # noqa: E501 params: Params = {} if is_system is not None: @@ -684,6 +772,374 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def key_generators(self) -> Result[List[str]]: + """Returns the available key generators for collections. + + Returns: + list: List of available key generators. + + Raises: + CollectionKeyGeneratorsError: If retrieval fails. + + References: + - `get-the-available-key-generators `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/key-generators") + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise CollectionKeyGeneratorsError(resp, request) + return cast( + List[str], self.deserializer.loads(resp.raw_body)["keyGenerators"] + ) + + return await self._executor.execute(request, response_handler) + + async def has_document( + self, + document: str | Json, + allow_dirty_read: bool = False, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[bool]: + """Check if a document exists. + + Args: + document (str | dict): Document ID, key or body. + Document body must contain the "_id" field. + allow_dirty_read (bool): Allow reads from followers in a cluster. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + `True` if the document exists, `False` otherwise. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + + References: + - `get-a-document-header `__ + """ # noqa: E501 + col = Collection.get_col_name(document) + return await self.collection(col).has( + document, + allow_dirty_read=allow_dirty_read, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def document( + self, + document: str | Json, + allow_dirty_read: bool = False, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + ) -> Result[Optional[Json]]: + """Return a document. + + Args: + document (str | dict): Document ID, key or body. + Document body must contain the "_id" field. + allow_dirty_read (bool): Allow reads from followers in a cluster. + if_match (str | None): The document is returned, if it has the same + revision as the given ETag. + if_none_match (str | None): The document is returned, if it has a + different revision than the given ETag. + + Returns: + Document or `None` if not found. + + Raises: + DocumentRevisionError: If the revision is incorrect. + DocumentGetError: If retrieval fails. + DocumentParseError: If the document is malformed. + + References: + - `get-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.get( + document, + allow_dirty_read=allow_dirty_read, + if_match=if_match, + if_none_match=if_none_match, + ) + + async def insert_document( + self, + collection: str, + document: Json, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + overwrite: Optional[bool] = None, + overwrite_mode: Optional[str] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + ) -> Result[bool | Json]: + """Insert a new document. + + Args: + collection (str): Collection name. + document (dict): Document to insert. If it contains the "_key" or "_id" + field, the value is used as the key of the new document (otherwise + it is auto-generated). Any "_rev" field is ignored. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. Only available if the + `overwrite` option is used. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + overwrite (bool | None): If set to `True`, operation does not fail on + duplicate key and existing document is overwritten (replace-insert). + overwrite_mode (str | None): Overwrite mode. Supersedes **overwrite** + option. May be one of "ignore", "replace", "update" or "conflict". + keep_null (bool | None): If set to `True`, fields with value None are + retained in the document. Otherwise, they are removed completely. + Applies only when **overwrite_mode** is set to "update" + (update-insert). + merge_objects (bool | None): If set to `True`, sub-dictionaries are merged + instead of the new one overwriting the old one. Applies only when + **overwrite_mode** is set to "update" (update-insert). + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document insertions affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. Only applicable if **overwrite** is set to `True` + or **overwrite_mode** is set to "update" or "replace". + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentInsertError: If insertion fails. + DocumentParseError: If the document is malformed. + + References: + - `create-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection(collection) + return await col.insert( + document, + wait_for_sync=wait_for_sync, + return_new=return_new, + return_old=return_old, + silent=silent, + overwrite=overwrite, + overwrite_mode=overwrite_mode, + keep_null=keep_null, + merge_objects=merge_objects, + refill_index_caches=refill_index_caches, + version_attribute=version_attribute, + ) + + async def update_document( + self, + document: Json, + ignore_revs: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + keep_null: Optional[bool] = None, + merge_objects: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Update a document. + + Args: + document (dict): Partial or full document with the updated values. + It must contain the "_key" or "_id" field. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only updated if the current revision is the one + specified. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + keep_null (bool | None): If the intention is to delete existing attributes + with the patch command, set this parameter to `False`. + merge_objects (bool | None): Controls whether objects (not arrays) are + merged if present in both the existing and the patch document. + If set to `False`, the value in the patch document overwrites the + existing document’s value. If set to `True`, objects are merged. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + if_match (str | None): You can conditionally update a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentUpdateError: If update fails. + + References: + - `update-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.update( + document, + ignore_revs=ignore_revs, + wait_for_sync=wait_for_sync, + return_new=return_new, + return_old=return_old, + silent=silent, + keep_null=keep_null, + merge_objects=merge_objects, + refill_index_caches=refill_index_caches, + version_attribute=version_attribute, + if_match=if_match, + ) + + async def replace_document( + self, + document: Json, + ignore_revs: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + return_new: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Replace a document. + + Args: + document (dict): New document. It must contain the "_key" or "_id" field. + Edge document must also have "_from" and "_to" fields. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only replaced if the current revision is the one + specified. + wait_for_sync (bool | None): Wait until document has been synced to disk. + return_new (bool | None): Additionally return the complete new document + under the attribute `new` in the result. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + version_attribute (str | None): Support for simple external versioning to + document operations. + if_match (str | None): You can conditionally replace a document based on a + target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True`. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentReplaceError: If replace fails. + + References: + - `replace-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.replace( + document, + ignore_revs=ignore_revs, + wait_for_sync=wait_for_sync, + return_new=return_new, + return_old=return_old, + silent=silent, + refill_index_caches=refill_index_caches, + version_attribute=version_attribute, + if_match=if_match, + ) + + async def delete_document( + self, + document: str | Json, + ignore_revs: Optional[bool] = None, + ignore_missing: bool = False, + wait_for_sync: Optional[bool] = None, + return_old: Optional[bool] = None, + silent: Optional[bool] = None, + refill_index_caches: Optional[bool] = None, + if_match: Optional[str] = None, + ) -> Result[bool | Json]: + """Delete a document. + + Args: + document (str | dict): Document ID, key or body. The body must contain the + "_key" or "_id" field. + ignore_revs (bool | None): If set to `True`, the `_rev` attribute in the + document is ignored. If this is set to `False`, then the `_rev` + attribute given in the body document is taken as a precondition. + The document is only replaced if the current revision is the one + specified. + ignore_missing (bool): Do not raise an exception on missing document. + This parameter has no effect in transactions where an exception is + always raised on failures. + wait_for_sync (bool | None): Wait until operation has been synced to disk. + return_old (bool | None): Additionally return the complete old document + under the attribute `old` in the result. + silent (bool | None): If set to `True`, no document metadata is returned. + This can be used to save resources. + refill_index_caches (bool | None): Whether to add new entries to + in-memory index caches if document updates affect the edge index + or cache-enabled persistent indexes. + if_match (bool | None): You can conditionally remove a document based + on a target revision id by using the "if-match" HTTP header. + + Returns: + bool | dict: Document metadata (e.g. document id, key, revision) or `True` + if **silent** is set to `True` and the document was found. + + Raises: + DocumentRevisionError: If precondition was violated. + DocumentDeleteError: If deletion fails. + + References: + - `remove-a-document `__ + """ # noqa: E501 + col: StandardCollection[Json, Json, Jsons] = self.collection( + Collection.get_col_name(document) + ) + return await col.delete( + document, + ignore_revs=ignore_revs, + ignore_missing=ignore_missing, + wait_for_sync=wait_for_sync, + return_old=return_old, + silent=silent, + refill_index_caches=refill_index_caches, + if_match=if_match, + ) + def graph( self, name: str, @@ -742,7 +1198,7 @@ async def graphs(self) -> Result[List[GraphProperties]]: GraphListError: If the operation fails. References: - - `list-all-graphs `__ + - `list-all-graphs `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint="/_api/gharial") @@ -797,7 +1253,7 @@ async def create_graph( GraphCreateError: If the operation fails. References: - - `create-a-graph `__ + - `create-a-graph `__ """ # noqa: E501 params: Params = {} if wait_for_sync is not None: @@ -859,7 +1315,7 @@ async def delete_graph( GraphDeleteError: If the operation fails. References: - - `drop-a-graph `__ + - `drop-a-graph `__ """ # noqa: E501 params: Params = {} if drop_collections is not None: @@ -878,568 +1334,1990 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) - async def has_user(self, username: str) -> Result[bool]: - """Check if a user exists. + async def view(self, name: str) -> Result[Json]: + """Return the properties of a view. Args: - username (str): Username. + name (str): View name. Returns: - bool: True if the user exists, False otherwise. + dict: View properties. Raises: - UserListError: If the operation fails. - """ - request = Request(method=Method.GET, endpoint=f"/_api/user/{username}") + ViewGetError: If the operation fails. - def response_handler(resp: Response) -> bool: - if resp.is_success: - return True - if resp.status_code == HTTP_NOT_FOUND: - return False - raise UserListError(resp, request) + References: + - `read-properties-of-a-view `__ + - `get-the-properties-of-a-view `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/view/{name}/properties") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ViewGetError(resp, request) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) - async def user(self, username: str) -> Result[UserInfo]: - """Fetches data about a user. + async def view_info(self, name: str) -> Result[Json]: + """Return basic information about a specific view. Args: - username (str): Username. + name (str): View name. Returns: - UserInfo: User details. + dict: View information. Raises: - UserGetError: If the operation fails. + ViewGetError: If the operation fails. References: - - `get-a-user` `__ + - `get-information-about-a-view `_ + - `get-information-about-a-view `__ """ # noqa: E501 - request = Request(method=Method.GET, endpoint=f"/_api/user/{username}") + request = Request(method=Method.GET, endpoint=f"/_api/view/{name}") - def response_handler(resp: Response) -> UserInfo: + def response_handler(resp: Response) -> Json: if not resp.is_success: - raise UserGetError(resp, request) - body = self.deserializer.loads(resp.raw_body) - return UserInfo( - user=body["user"], - active=cast(bool, body.get("active")), - extra=body.get("extra"), - ) + raise ViewGetError(resp, request) + return Response.format_body(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) - async def users(self) -> Result[Sequence[UserInfo]]: - """Fetches data about all users. - - Without the necessary permissions, you might only get data about the - current user. + async def views(self) -> Result[Jsons]: + """List all views in the database along with their summary information. Returns: - list: User information. + list: List of views with their properties. Raises: - UserListError: If the operation fails. + ViewListError: If the operation fails. References: - - `list-available-users `__ + - `list-all-views `__ + - `list-all-views `__ """ # noqa: E501 - request = Request(method=Method.GET, endpoint="/_api/user") + request = Request(method=Method.GET, endpoint="/_api/view") - def response_handler(resp: Response) -> Sequence[UserInfo]: + def response_handler(resp: Response) -> Jsons: if not resp.is_success: - raise UserListError(resp, request) + raise ViewListError(resp, request) body = self.deserializer.loads(resp.raw_body) - return [ - UserInfo(user=u["user"], active=u.get("active"), extra=u.get("extra")) - for u in body["result"] - ] + return cast(Jsons, body["result"]) return await self._executor.execute(request, response_handler) - async def create_user(self, user: UserInfo | Json) -> Result[UserInfo]: - """Create a new user. + async def create_view( + self, + name: str, + view_type: str, + properties: Optional[Json] = None, + ) -> Result[Json]: + """Create a view. Args: - user (UserInfo | dict): User information. + name (str): View name. + view_type (str): Type of the view (e.g., "arangosearch", "view"). + properties (dict | None): Properties of the view. Returns: - UserInfo: New user details. + dict: View properties. Raises: - ValueError: If the username is missing. - UserCreateError: If the operation fails. - - Example: - .. code-block:: python - - await db.create_user(UserInfo(user="john", password="secret")) - await db.create_user({user="john", password="secret"}) + ViewCreateError: If the operation fails. References: - - `create-a-user `__ + - `create-a-search-alias-view `__ + - `create-an-arangosearch-view `__ """ # noqa: E501 - if isinstance(user, dict): - user = UserInfo(**user) - if not user.user: - raise ValueError("Username is required.") + data: Json = {"name": name, "type": view_type} + if properties is not None: + data.update(properties) - data: Json = user.format(UserInfo.user_management_formatter) request = Request( method=Method.POST, - endpoint="/_api/user", + endpoint="/_api/view", data=self.serializer.dumps(data), ) - def response_handler(resp: Response) -> UserInfo: + def response_handler(resp: Response) -> Json: if not resp.is_success: - raise UserCreateError(resp, request) - body = self.deserializer.loads(resp.raw_body) - return UserInfo( - user=body["user"], - active=cast(bool, body.get("active")), - extra=body.get("extra"), - ) + raise ViewCreateError(resp, request) + return self.deserializer.loads(resp.raw_body) return await self._executor.execute(request, response_handler) - async def replace_user(self, user: UserInfo | Json) -> Result[UserInfo]: - """Replace the data of an existing user. + async def replace_view(self, name: str, properties: Json) -> Result[Json]: + """Replace the properties of an existing view. Args: - user (UserInfo | dict): New user information. + name (str): View name. + properties (dict): New properties for the view. Returns: - UserInfo: New user details. + dict: Updated view properties. Raises: - ValueError: If the username is missing. - UserReplaceError: If the operation fails. + ViewReplaceError: If the operation fails. References: - - `replace-a-user `__ + - `replace-the-properties-of-a-search-alias-view `__ + - `replace-the-properties-of-an-arangosearch-view `__ """ # noqa: E501 - if isinstance(user, dict): - user = UserInfo(**user) - if not user.user: - raise ValueError("Username is required.") - - data: Json = user.format(UserInfo.user_management_formatter) request = Request( method=Method.PUT, - endpoint=f"/_api/user/{user.user}", - data=self.serializer.dumps(data), + endpoint=f"/_api/view/{name}/properties", + data=self.serializer.dumps(properties), ) - def response_handler(resp: Response) -> UserInfo: - if not resp.is_success: - raise UserReplaceError(resp, request) - body = self.deserializer.loads(resp.raw_body) - return UserInfo( - user=body["user"], - active=cast(bool, body.get("active")), - extra=body.get("extra"), - ) + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self.deserializer.loads(resp.raw_body) + raise ViewReplaceError(resp, request) return await self._executor.execute(request, response_handler) - async def update_user(self, user: UserInfo | Json) -> Result[UserInfo]: - """Partially modifies the data of an existing user. + async def update_view(self, name: str, properties: Json) -> Result[Json]: + """Update the properties of an existing view. Args: - user (UserInfo | dict): User information. + name (str): View name. + properties (dict): New properties for the view. Returns: - UserInfo: Updated user details. + dict: Updated view properties. Raises: - ValueError: If the username is missing. - UserUpdateError: If the operation fails. + ViewUpdateError: If the operation fails. References: - - `update-a-user `__ + - `update-the-properties-of-a-search-alias-view `__ + - `update-the-properties-of-an-arangosearch-view `__ """ # noqa: E501 - if isinstance(user, dict): - user = UserInfo(**user) - if not user.user: - raise ValueError("Username is required.") - - data: Json = user.format(UserInfo.user_management_formatter) request = Request( method=Method.PATCH, - endpoint=f"/_api/user/{user.user}", - data=self.serializer.dumps(data), + endpoint=f"/_api/view/{name}/properties", + data=self.serializer.dumps(properties), ) - def response_handler(resp: Response) -> UserInfo: - if not resp.is_success: - raise UserUpdateError(resp, request) - body = self.deserializer.loads(resp.raw_body) - return UserInfo( - user=body["user"], - active=cast(bool, body.get("active")), - extra=body.get("extra"), - ) + def response_handler(resp: Response) -> Json: + if resp.is_success: + return self.deserializer.loads(resp.raw_body) + raise ViewUpdateError(resp, request) return await self._executor.execute(request, response_handler) - async def delete_user( - self, - username: str, - ignore_missing: bool = False, - ) -> Result[bool]: - """Delete a user. + async def rename_view(self, name: str, new_name: str) -> None: + """Rename an existing view (not supported in cluster deployments). Args: - username (str): Username. - ignore_missing (bool): Do not raise an exception on missing user. - - Returns: - bool: True if the user was deleted successfully, `False` if the user was - not found but **ignore_missing** was set to `True`. + name (str): Current view name. + new_name (str): New view name. Raises: - UserDeleteError: If the operation fails. + ViewRenameError: If the operation fails. References: - - `remove-a-user `__ + - `rename-a-view `__ + - `rename-a-view `__ """ # noqa: E501 - request = Request(method=Method.DELETE, endpoint=f"/_api/user/{username}") + request = Request( + method=Method.PUT, + endpoint=f"/_api/view/{name}/rename", + data=self.serializer.dumps({"name": new_name}), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ViewRenameError(resp, request) + + await self._executor.execute(request, response_handler) + + async def delete_view( + self, name: str, ignore_missing: bool = False + ) -> Result[bool]: + """Delete a view. + + Args: + name (str): View name. + ignore_missing (bool): If `True`, do not raise an exception if the + view does not exist. + + Returns: + bool: `True` if the view was deleted successfully, `False` if the + view was not found and **ignore_missing** was set to `True`. + + Raises: + ViewDeleteError: If the operation fails. + + References: + - `drop-a-view `__ + - `drop-a-view `__ + """ # noqa: E501 + request = Request(method=Method.DELETE, endpoint=f"/_api/view/{name}") def response_handler(resp: Response) -> bool: if resp.is_success: return True if resp.status_code == HTTP_NOT_FOUND and ignore_missing: return False - raise UserDeleteError(resp, request) + raise ViewDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def analyzers(self) -> Result[Jsons]: + """List all analyzers in the database. + + Returns: + list: List of analyzers with their properties. + + Raises: + AnalyzerListError: If the operation fails. + + References: + - `list-all-analyzers `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/analyzer") + + def response_handler(resp: Response) -> Jsons: + if resp.is_success: + result: Jsons = self.deserializer.loads(resp.raw_body)["result"] + return result + raise AnalyzerListError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def analyzer(self, name: str) -> Result[Json]: + """Return analyzer details. + + Args: + name (str): Analyzer name. + + Returns: + dict: Analyzer properties. + + References: + - `get-an-analyzer-definition `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/analyzer/{name}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AnalyzerGetError(resp, request) + return Response.format_body(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def create_analyzer( + self, + name: str, + analyzer_type: str, + properties: Optional[Json] = None, + features: Optional[Sequence[str]] = None, + ) -> Result[Json]: + """Create an analyzer. + + Args: + name (str): Analyzer name. + analyzer_type (str): Type of the analyzer (e.g., "text", "identity"). + properties (dict | None): Properties of the analyzer. + features (list | None): The set of features to set on the Analyzer + generated fields. The default value is an empty array. Possible values: + "frequency", "norm", "position", "offset". + + Returns: + dict: Analyzer properties. + + Raises: + AnalyzerCreateError: If the operation fails. + + References: + - `create-an-analyzer `__ + """ # noqa: E501 + data: Json = {"name": name, "type": analyzer_type} + if properties is not None: + data["properties"] = properties + if features is not None: + data["features"] = features + + request = Request( + method=Method.POST, + endpoint="/_api/analyzer", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AnalyzerCreateError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def delete_analyzer( + self, name: str, force: Optional[bool] = None, ignore_missing: bool = False + ) -> Result[bool]: + """Delete an analyzer. + + Args: + name (str): Analyzer name. + force (bool | None): Remove the analyzer configuration even if in use. + ignore_missing (bool): Do not raise an exception on missing analyzer. + + Returns: + bool: `True` if the analyzer was deleted successfully, `False` if the + analyzer was not found and **ignore_missing** was set to `True`. + + Raises: + AnalyzerDeleteError: If the operation fails. + + References: + - `remove-an-analyzer `__ + """ # noqa: E501 + params: Params = {} + if force is not None: + params["force"] = force + + request = Request( + method=Method.DELETE, + endpoint=f"/_api/analyzer/{name}", + params=params, + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise AnalyzerDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def has_user(self, username: str) -> Result[bool]: + """Check if a user exists. + + Args: + username (str): Username. + + Returns: + bool: True if the user exists, False otherwise. + + Raises: + UserListError: If the operation fails. + """ + request = Request(method=Method.GET, endpoint=f"/_api/user/{username}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND: + return False + raise UserListError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def user(self, username: str) -> Result[UserInfo]: + """Fetches data about a user. + + Args: + username (str): Username. + + Returns: + UserInfo: User details. + + Raises: + UserGetError: If the operation fails. + + References: + - `get-a-user` `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/user/{username}") + + def response_handler(resp: Response) -> UserInfo: + if not resp.is_success: + raise UserGetError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return UserInfo( + user=body["user"], + active=cast(bool, body.get("active")), + extra=body.get("extra"), + ) + + return await self._executor.execute(request, response_handler) + + async def users(self) -> Result[Sequence[UserInfo]]: + """Fetches data about all users. + + Without the necessary permissions, you might only get data about the + current user. + + Returns: + list: User information. + + Raises: + UserListError: If the operation fails. + + References: + - `list-available-users `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/user") + + def response_handler(resp: Response) -> Sequence[UserInfo]: + if not resp.is_success: + raise UserListError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return [ + UserInfo(user=u["user"], active=u.get("active"), extra=u.get("extra")) + for u in body["result"] + ] + + return await self._executor.execute(request, response_handler) + + async def create_user(self, user: UserInfo | Json) -> Result[UserInfo]: + """Create a new user. + + Args: + user (UserInfo | dict): User information. + + Returns: + UserInfo: New user details. + + Raises: + ValueError: If the username is missing. + UserCreateError: If the operation fails. + + Example: + .. code-block:: python + + await db.create_user(UserInfo(user="john", password="secret")) + await db.create_user({user="john", password="secret"}) + + References: + - `create-a-user `__ + """ # noqa: E501 + if isinstance(user, dict): + user = UserInfo(**user) + if not user.user: + raise ValueError("Username is required.") + + data: Json = user.format(UserInfo.user_management_formatter) + request = Request( + method=Method.POST, + endpoint="/_api/user", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> UserInfo: + if not resp.is_success: + raise UserCreateError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return UserInfo( + user=body["user"], + active=cast(bool, body.get("active")), + extra=body.get("extra"), + ) + + return await self._executor.execute(request, response_handler) + + async def replace_user(self, user: UserInfo | Json) -> Result[UserInfo]: + """Replace the data of an existing user. + + Args: + user (UserInfo | dict): New user information. + + Returns: + UserInfo: New user details. + + Raises: + ValueError: If the username is missing. + UserReplaceError: If the operation fails. + + References: + - `replace-a-user `__ + """ # noqa: E501 + if isinstance(user, dict): + user = UserInfo(**user) + if not user.user: + raise ValueError("Username is required.") + + data: Json = user.format(UserInfo.user_management_formatter) + request = Request( + method=Method.PUT, + endpoint=f"/_api/user/{user.user}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> UserInfo: + if not resp.is_success: + raise UserReplaceError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return UserInfo( + user=body["user"], + active=cast(bool, body.get("active")), + extra=body.get("extra"), + ) + + return await self._executor.execute(request, response_handler) + + async def update_user(self, user: UserInfo | Json) -> Result[UserInfo]: + """Partially modifies the data of an existing user. + + Args: + user (UserInfo | dict): User information. + + Returns: + UserInfo: Updated user details. + + Raises: + ValueError: If the username is missing. + UserUpdateError: If the operation fails. + + References: + - `update-a-user `__ + """ # noqa: E501 + if isinstance(user, dict): + user = UserInfo(**user) + if not user.user: + raise ValueError("Username is required.") + + data: Json = user.format(UserInfo.user_management_formatter) + request = Request( + method=Method.PATCH, + endpoint=f"/_api/user/{user.user}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> UserInfo: + if not resp.is_success: + raise UserUpdateError(resp, request) + body = self.deserializer.loads(resp.raw_body) + return UserInfo( + user=body["user"], + active=cast(bool, body.get("active")), + extra=body.get("extra"), + ) + + return await self._executor.execute(request, response_handler) + + async def delete_user( + self, + username: str, + ignore_missing: bool = False, + ) -> Result[bool]: + """Delete a user. + + Args: + username (str): Username. + ignore_missing (bool): Do not raise an exception on missing user. + + Returns: + bool: True if the user was deleted successfully, `False` if the user was + not found but **ignore_missing** was set to `True`. + + Raises: + UserDeleteError: If the operation fails. + + References: + - `remove-a-user `__ + """ # noqa: E501 + request = Request(method=Method.DELETE, endpoint=f"/_api/user/{username}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise UserDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def permissions(self, username: str, full: bool = True) -> Result[Json]: + """Return user permissions for all databases and collections. + + Args: + username (str): Username. + full (bool): If `True`, the result will contain the permissions for the + databases as well as the permissions for the collections. + + Returns: + dict: User permissions for all databases and (optionally) collections. + + Raises: + PermissionListError: If the operation fails. + + References: + - `list-a-users-accessible-databases `__ + """ # noqa: 501 + request = Request( + method=Method.GET, + endpoint=f"/_api/user/{username}/database", + params={"full": full}, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + raise PermissionListError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def permission( + self, + username: str, + database: str, + collection: Optional[str] = None, + ) -> Result[str]: + """Return user permission for a specific database or collection. + + Args: + username (str): Username. + database (str): Database name. + collection (str | None): Collection name. + + Returns: + str: User access level. + + Raises: + PermissionGetError: If the operation fails. + + References: + - `get-a-users-database-access-level `__ + - `get-a-users-collection-access-level `__ + """ # noqa: 501 + endpoint = f"/_api/user/{username}/database/{database}" + if collection is not None: + endpoint += f"/{collection}" + request = Request(method=Method.GET, endpoint=endpoint) + + def response_handler(resp: Response) -> str: + if resp.is_success: + return cast(str, self.deserializer.loads(resp.raw_body)["result"]) + raise PermissionGetError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def update_permission( + self, + username: str, + permission: str, + database: str, + collection: Optional[str] = None, + ignore_failure: bool = False, + ) -> Result[bool]: + """Update user permissions for a specific database or collection. + + Args: + username (str): Username. + permission (str): Allowed values are "rw" (administrate), + "ro" (access) and "none" (no access). + database (str): Database to set the access level for. + collection (str | None): Collection to set the access level for. + ignore_failure (bool): Do not raise an exception on failure. + + Returns: + bool: `True` if the operation was successful. + + Raises: + PermissionUpdateError: If the operation fails and `ignore_failure` + is `False`. + + References: + - `set-a-users-database-access-level `__ + - `set-a-users-collection-access-level `__ + """ # noqa: E501 + endpoint = f"/_api/user/{username}/database/{database}" + if collection is not None: + endpoint += f"/{collection}" + + request = Request( + method=Method.PUT, + endpoint=endpoint, + data=self.serializer.dumps({"grant": permission}), + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if ignore_failure: + return False + raise PermissionUpdateError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def reset_permission( + self, + username: str, + database: str, + collection: Optional[str] = None, + ignore_failure: bool = False, + ) -> Result[bool]: + """Reset user permission for a specific database or collection. + + Args: + username (str): Username. + database (str): Database to reset the access level for. + collection (str | None): Collection to reset the access level for. + ignore_failure (bool): Do not raise an exception on failure. + + Returns: + bool: `True` if the operation was successful. + + Raises: + PermissionResetError: If the operation fails and `ignore_failure` + is `False`. + + References: + - `clear-a-users-database-access-level `__ + - `clear-a-users-collection-access-level `__ + """ # noqa: E501 + endpoint = f"/_api/user/{username}/database/{database}" + if collection is not None: + endpoint += f"/{collection}" + + request = Request( + method=Method.DELETE, + endpoint=endpoint, + ) + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if ignore_failure: + return False + raise PermissionResetError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def jwt_secrets(self) -> Result[Json]: + """Return information on currently loaded JWT secrets. + + Returns: + dict: JWT secrets. + + Raises: + JWTSecretListError: If the operation fails. + + References: + - `get-information-about-the-loaded-jwt-secrets `__ + """ # noqa: 501 + request = Request(method=Method.GET, endpoint="/_admin/server/jwt") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise JWTSecretListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def reload_jwt_secrets(self) -> Result[Json]: + """Hot_reload JWT secrets from disk. + + Returns: + dict: Information on reloaded JWT secrets. + + Raises: + JWTSecretReloadError: If the operation fails. + + References: + - `hot-reload-the-jwt-secrets-from-disk `__ + """ # noqa: 501 + request = Request( + method=Method.POST, endpoint="/_admin/server/jwt", prefix_needed=False + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise JWTSecretReloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def create_access_token( + self, + user: str, + name: str, + valid_until: int, + ) -> Result[AccessToken]: + """Create an access token for the given user. + + Args: + user (str): The name of the user. + name (str): A name for the access token to make identification easier, + like a short description. + valid_until (int): A Unix timestamp in seconds to set the expiration date and time. + + Returns: + AccessToken: Information about the created access token, including the token itself. + + Raises: + AccessTokenCreateError: If the operation fails. + + References: + - `create-an-access-token `__ + """ # noqa: E501 + data: Json = { + "name": name, + "valid_until": valid_until, + } + + request = Request( + method=Method.POST, + endpoint=f"/_api/token/{user}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> AccessToken: + if not resp.is_success: + raise AccessTokenCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return AccessToken(result) + + return await self._executor.execute(request, response_handler) + + async def delete_access_token(self, user: str, token_id: int) -> None: + """List all access tokens for the given user. + + Args: + user (str): The name of the user. + token_id (int): The ID of the access token to delete. + + Raises: + AccessTokenDeleteError: If the operation fails. + + References: + - `delete-an-access-token `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, endpoint=f"/_api/token/{user}/{token_id}" + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise AccessTokenDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def list_access_tokens(self, user: str) -> Result[Jsons]: + """List all access tokens for the given user. + + Args: + user (str): The name of the user. + + Returns: + list: List of access tokens for the user. + + Raises: + AccessTokenListError: If the operation fails. + + References: + - `list-all-access-tokens `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/token/{user}") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AccessTokenListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Jsons, result["tokens"]) + + return await self._executor.execute(request, response_handler) + + async def tls(self) -> Result[Json]: + """Return TLS data (keyfile, clientCA). + + This API requires authentication. + + Returns: + dict: dict containing the following components: + - keyfile: Information about the key file. + - clientCA: Information about the Certificate Authority (CA) for client certificate verification. + + Raises: + ServerTLSError: If the operation fails. + + References: + - `get-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def reload_tls(self) -> Result[Json]: + """Reload TLS data (keyfile, clientCA). + + This is a protected API and can only be executed with superuser rights. + + Returns: + dict: New TLS data. + + Raises: + ServerTLSReloadError: If the operation fails. + + References: + - `reload-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSReloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def encryption(self) -> Result[Json]: + """Rotate the user-supplied keys for encryption. + + This is a protected API and can only be executed with superuser rights. + This API is not available on Coordinator nodes. + + Returns: + dict: Encryption keys. + + Raises: + ServerEncryptionError: If the operation fails. + + References: + - `rotate-the-encryption-keys `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/encryption") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEncryptionError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def list_transactions(self) -> Result[Jsons]: + """List all currently running stream transactions. + + Returns: + list: List of transactions, with each transaction containing + an "id" and a "state" field. + + Raises: + TransactionListError: If the operation fails on the server side. + """ + request = Request(method=Method.GET, endpoint="/_api/transaction") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise TransactionListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Jsons, result["transactions"]) + + return await self._executor.execute(request, response_handler) + + async def execute_transaction( + self, + command: str, + params: Optional[Json] = None, + read: Optional[str | Sequence[str]] = None, + write: Optional[str | Sequence[str]] = None, + exclusive: Optional[str | Sequence[str]] = None, + allow_implicit: Optional[bool] = None, + wait_for_sync: Optional[bool] = None, + lock_timeout: Optional[int] = None, + max_transaction_size: Optional[int] = None, + ) -> Result[Any]: + """Execute a JavaScript Transaction. + + Warning: + JavaScript Transactions are deprecated from ArangoDB v3.12.0 onward and + will be removed in a future version. + + Args: + command (str): The actual transaction operations to be executed, in the + form of stringified JavaScript code. + params (dict): Optional parameters passed into the JavaScript command. + read (str | list | None): Name(s) of collections read during transaction. + write (str | list | None): Name(s) of collections written to during + transaction with shared access. + exclusive (str | list | None): Name(s) of collections written to during + transaction with exclusive access. + allow_implicit (bool | None): Allow reading from undeclared collections. + wait_for_sync (bool | None): If `True`, will force the transaction to write + all data to disk before returning. + lock_timeout (int | None): Timeout for waiting on collection locks. Setting + it to 0 will prevent ArangoDB from timing out while waiting for a lock. + max_transaction_size (int | None): Transaction size limit in bytes. + + Returns: + Any: Result of the transaction. + + Raises: + TransactionExecuteError: If the operation fails on the server side. + + References: + - `execute-a-javascript-transaction `__ + """ # noqa: 501 + m = "JavaScript Transactions are deprecated from ArangoDB v3.12.0 onward and will be removed in a future version." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + + collections = dict() + if read is not None: + collections["read"] = read + if write is not None: + collections["write"] = write + if exclusive is not None: + collections["exclusive"] = exclusive + + data: Json = dict(collections=collections, action=command) + if params is not None: + data["params"] = params + if wait_for_sync is not None: + data["waitForSync"] = wait_for_sync + if allow_implicit is not None: + data["allowImplicit"] = allow_implicit + if lock_timeout is not None: + data["lockTimeout"] = lock_timeout + if max_transaction_size is not None: + data["maxTransactionSize"] = max_transaction_size + + request = Request( + method=Method.POST, + endpoint="/_api/transaction", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise TransactionExecuteError(resp, request) + return self.deserializer.loads(resp.raw_body)["result"] + + return await self._executor.execute(request, response_handler) + + async def version(self, details: bool = False) -> Result[Json]: + """Return the server version information. + + Args: + details (bool): If `True`, return detailed version information. + + Returns: + dict: Server version information. + + Raises: + ServerVersionError: If the operation fails on the server side. + + References: + - `get-the-server-version `__ + """ # noqa: E501 + request = Request( + method=Method.GET, endpoint="/_api/version", params={"details": details} + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerVersionError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def tasks(self) -> Result[Jsons]: + """Fetches all existing tasks from the server. + + Returns: + list: List of currently active server tasks. + + Raises: + TaskListError: If the list cannot be retrieved. + + References: + - `list-all-tasks `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/tasks") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise TaskListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def task(self, task_id: str) -> Result[Json]: + """Return the details of an active server task. + + Args: + task_id (str) -> Server task ID. + + Returns: + dict: Details of the server task. + + Raises: + TaskGetError: If the task details cannot be retrieved. + + References: + - `get-a-task `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_task( + self, + command: str, + task_id: Optional[str] = None, + name: Optional[str] = None, + offset: Optional[int] = None, + params: Optional[Json] = None, + period: Optional[int] = None, + ) -> Result[Json]: + """Create a new task. + + Args: + command (str): The JavaScript code to be executed. + task_id (str | None): Optional task ID. If not provided, the server will + generate a unique ID. + name (str | None): The name of the task. + offset (int | None): The offset in seconds after which the task should + start executing. + params (dict | None): Parameters to be passed to the command. + period (int | None): The number of seconds between the executions. + + Returns: + dict: Details of the created task. + + Raises: + TaskCreateError: If the task cannot be created. + + References: + - `create-a-task `__ + - `create-a-task-with-id `__ + """ # noqa: E501 + data: Json = {"command": command} + if name is not None: + data["name"] = name + if offset is not None: + data["offset"] = offset + if params is not None: + data["params"] = params + if period is not None: + data["period"] = period + + if task_id is None: + request = Request( + method=Method.POST, + endpoint="/_api/tasks", + data=self.serializer.dumps(data), + ) + else: + request = Request( + method=Method.PUT, + endpoint=f"/_api/tasks/{task_id}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_task( + self, + task_id: str, + ignore_missing: bool = False, + ) -> Result[bool]: + """Delete a server task. + + Args: + task_id (str): Task ID. + ignore_missing (bool): If `True`, do not raise an exception if the + task does not exist. + + Returns: + bool: `True` if the task was deleted successfully, `False` if the + task was not found and **ignore_missing** was set to `True`. + + Raises: + TaskDeleteError: If the operation fails. + + References: + - `delete-a-task `__ + """ # noqa: E501 + request = Request(method=Method.DELETE, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise TaskDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def engine(self) -> Result[Json]: + """Returns the storage engine the server is configured to use. + + Returns: + dict: Database engine details. + + Raises: + ServerEngineError: If the operation fails. + + References: + - `get-the-storage-engine-type `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/engine") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEngineError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def time(self) -> Result[datetime]: + """Return server system time. + + Returns: + datetime.datetime: Server system time. + + Raises: + ServerTimeError: If the operation fails. + + References: + - `get-the-system-time `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/time") + + def response_handler(resp: Response) -> datetime: + if not resp.is_success: + raise ServerTimeError(resp, request) + return datetime.fromtimestamp( + self.deserializer.loads(resp.raw_body)["time"] + ) + + return await self._executor.execute(request, response_handler) + + async def check_availability(self) -> Result[str]: + """Return ArangoDB server availability mode. + + Returns: + str: Server availability mode, either "readonly" or "default". + + Raises: + ServerCheckAvailabilityError: If the operation fails. + + References: + - `check-server-availability `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/availability", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerCheckAvailabilityError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def support_info(self) -> Result[Json]: + """Retrieves deployment information for support purposes. + + Note: + As this API may reveal sensitive data about the deployment, it can only be accessed from inside the _system database. + + Returns: + dict: Deployment information + + Raises: + DatabaseSupportInfoError: If the operation fails. + + References: + - `get-information-about-the-deployment `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/support-info") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DatabaseSupportInfoError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options(self) -> Result[Json]: + """Return the currently-set server options. + + Returns: + dict: Server options. + + Raises: + ServerCurrentOptionsGetError: If the operation fails. + + References: + - `get-the-startup-option-configuration `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerCurrentOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options_available(self) -> Result[Json]: + """Return a description of all available server options. + + Returns: + dict: Server options description. + + Raises: + ServerAvailableOptionsGetError: If the operation fails. + + References: + - `get-the-available-startup-options `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options-description") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerAvailableOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def mode(self) -> Result[str]: + """Return the server mode ("default" or "readonly"). + + Returns: + str: Server mode, either "default" or "readonly". + + Raises: + ServerModeError: If the operation fails. + + References: + - `return-whether-or-not-a-server-is-in-read-only-mode `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["mode"]) + + return await self._executor.execute(request, response_handler) + + async def set_mode(self, mode: str) -> Result[str]: + """Set the server mode to read-only or default. + + Args: + mode (str): Server mode. Possible values are "default" or "readonly". + + Returns: + str: New server mode. + + Raises: + ServerModeSetError: If the operation fails. + + References: + - `set-the-server-mode-to-read-only-or-default `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/server/mode", + data=self.serializer.dumps({"mode": mode}), + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def license(self) -> Result[Json]: + """View the license information and status of an Enterprise Edition instance. + + Returns: + dict: Server license information. + + Raises: + ServerLicenseGetError: If the operation fails. + + References: + - `get-information-about-the-current-license `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/license") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLicenseGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_license(self, license: str, force: Optional[bool] = False) -> None: + """Set a new license for an Enterprise Edition instance. + + Args: + license (str) -> Base64-encoded license string, wrapped in double-quotes. + force (bool | None) -> Set to `True` to change the license even if it + expires sooner than the current one. + + Raises: + ServerLicenseSetError: If the operation fails. + + References: + - `set-a-new-license `__ + """ # noqa: E501 + params: Params = {} + if force is not None: + params["force"] = force + + request = Request( + method=Method.PUT, + endpoint="/_admin/license", + params=params, + data=license, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerLicenseSetError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown(self, soft: Optional[bool] = None) -> None: + """Initiate server shutdown sequence. + + Args: + soft (bool | None): If set to `True`, this initiates a soft shutdown. + + Raises: + ServerShutdownError: If the operation fails. + + References: + - `start-the-shutdown-sequence `__ + """ # noqa: E501 + params: Params = {} + if soft is not None: + params["soft"] = soft + + request = Request( + method=Method.DELETE, + endpoint="/_admin/shutdown", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerShutdownError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown_progress(self) -> Result[Json]: + """Query the soft shutdown progress. + + Returns: + dict: Information about the shutdown progress. + + Raises: + ServerShutdownProgressError: If the operation fails. + + References: + - `query-the-soft-shutdown-progress `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/shutdown") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerShutdownProgressError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def compact( + self, + change_level: Optional[bool] = None, + compact_bottom_most_level: Optional[bool] = None, + ) -> None: + """Compact all databases. This method requires superuser access. + + Note: + This command can cause a full rewrite of all data in all databases, + which may take very long for large databases. + + Args: + change_level (bool | None): Whether or not compacted data should be + moved to the minimum possible level. Default value is `False`. + compact_bottom_most_level (bool | None): Whether or not to compact the bottom-most level of data. + Default value is `False`. + + Returns: + dict: Information about the compaction process. + + Raises: + DatabaseCompactError: If the operation fails. + + References: + - `compact-all-databases `__ + """ # noqa: E501 + data = {} + if change_level is not None: + data["changeLevel"] = change_level + if compact_bottom_most_level is not None: + data["compactBottomMostLevel"] = compact_bottom_most_level + + request = Request( + method=Method.PUT, + endpoint="/_admin/compact", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise DatabaseCompactError(resp, request) + + await self._executor.execute(request, response_handler) + + async def reload_routing(self) -> None: + """Reload the routing information. + + Raises: + ServerReloadRoutingError: If the operation fails. + + References: + - `reload-the-routing-table `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/routing/reload") + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerReloadRoutingError(resp, request) + + await self._executor.execute(request, response_handler) + + async def echo(self, body: Optional[Json] = None) -> Result[Json]: + """Return an object with the servers request information. + + Args: + body (dict | None): Optional body of the request. + + Returns: + dict: Details of the request. + + Raises: + ServerEchoError: If the operation fails. + + References: + - `echo-a-request `__ + """ # noqa: E501 + data = body if body is not None else {} + request = Request(method=Method.POST, endpoint="/_admin/echo", data=data) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEchoError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def execute(self, command: str) -> Result[Any]: + """Execute raw Javascript command on the server. + + Args: + command (str): Javascript command to execute. + + Returns: + Return value of **command**, if any. + + Raises: + ServerExecuteError: If the execution fails. + + References: + - `execute-a-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_admin/execute", data=command.encode("utf-8") + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise ServerExecuteError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def request(self, request: Request) -> Result[Response]: + """Execute a custom request. + + Args: + request (Request): Request object to be executed. + + Returns: + Response: Response object containing the result of the request. + """ + + def response_handler(resp: Response) -> Response: + return resp return await self._executor.execute(request, response_handler) - async def permissions(self, username: str, full: bool = True) -> Result[Json]: - """Return user permissions for all databases and collections. + async def metrics(self, server_id: Optional[str] = None) -> Result[str]: + """Return server metrics in Prometheus format. Args: - username (str): Username. - full (bool): If `True`, the result will contain the permissions for the - databases as well as the permissions for the collections. + server_id (str | None): Returns metrics of the specified server. + If no serverId is given, the asked server will reply. Returns: - dict: User permissions for all databases and (optionally) collections. + str: Server metrics in Prometheus format. Raises: - PermissionListError: If the operation fails. + ServerMetricsError: If the operation fails. References: - - `list-a-users-accessible-databases `__ - """ # noqa: 501 + - `metrics-api-v2 `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + request = Request( method=Method.GET, - endpoint=f"/_api/user/{username}/database", - params={"full": full}, + endpoint="/_admin/metrics/v2", + params=params, ) - def response_handler(resp: Response) -> Json: - if resp.is_success: - result: Json = self.deserializer.loads(resp.raw_body)["result"] - return result - raise PermissionListError(resp, request) + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerMetricsError(resp, request) + return resp.raw_body.decode("utf-8") return await self._executor.execute(request, response_handler) - async def permission( + async def read_log_entries( self, - username: str, - database: str, - collection: Optional[str] = None, - ) -> Result[str]: - """Return user permission for a specific database or collection. + upto: Optional[int | str] = None, + level: Optional[str] = None, + start: Optional[int] = None, + size: Optional[int] = None, + offset: Optional[int] = None, + search: Optional[str] = None, + sort: Optional[str] = None, + server_id: Optional[str] = None, + ) -> Result[Json]: + """Read the global log from server. Args: - username (str): Username. - database (str): Database name. - collection (str | None): Collection name. + upto (int | str | None): Return the log entries up to the given level + (mutually exclusive with parameter **level**). Allowed values are + "fatal", "error", "warning", "info" (default), "debug" and "trace". + level (int | str | None): Return the log entries of only the given level + (mutually exclusive with **upto**). + start (int | None): Return the log entries whose ID is greater or equal to + the given value. + size (int | None): Restrict the size of the result to the given value. + This can be used for pagination. + offset (int | None): Number of entries to skip (e.g. for pagination). + search (str | None): Return only the log entries containing the given text. + sort (str | None): Sort the log entries according to the given fashion, + which can be "sort" or "desc". + server_id (str | None): Returns all log entries of the specified server. + If no serverId is given, the asked server will reply. Returns: - str: User access level. + dict: Server log entries. Raises: - PermissionGetError: If the operation fails. + ServerReadLogError: If the operation fails. References: - - `get-a-users-database-access-level `__ - - `get-a-users-collection-access-level `__ - """ # noqa: 501 - endpoint = f"/_api/user/{username}/database/{database}" - if collection is not None: - endpoint += f"/{collection}" - request = Request(method=Method.GET, endpoint=endpoint) + - `get-the-global-server-logs `__ + """ # noqa: E501 + params: Params = {} + if upto is not None: + params["upto"] = upto + if level is not None: + params["level"] = level + if start is not None: + params["start"] = start + if size is not None: + params["size"] = size + if offset is not None: + params["offset"] = offset + if search is not None: + params["search"] = search + if sort is not None: + params["sort"] = sort + if server_id is not None: + params["serverId"] = server_id - def response_handler(resp: Response) -> str: - if resp.is_success: - return cast(str, self.deserializer.loads(resp.raw_body)["result"]) - raise PermissionGetError(resp, request) + request = Request( + method=Method.GET, + endpoint="/_admin/log/entries", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerReadLogError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result return await self._executor.execute(request, response_handler) - async def update_permission( - self, - username: str, - permission: str, - database: str, - collection: Optional[str] = None, - ignore_failure: bool = False, - ) -> Result[bool]: - """Update user permissions for a specific database or collection. + async def log_levels( + self, server_id: Optional[str] = None, with_appenders: Optional[bool] = None + ) -> Result[Json]: + """Return current logging levels. Args: - username (str): Username. - permission (str): Allowed values are "rw" (administrate), - "ro" (access) and "none" (no access). - database (str): Database to set the access level for. - collection (str | None): Collection to set the access level for. - ignore_failure (bool): Do not raise an exception on failure. + server_id (str | None): Forward the request to the specified server. + with_appenders (bool | None): Include appenders in the response. Returns: - bool: `True` if the operation was successful. + dict: Current logging levels. Raises: - PermissionUpdateError: If the operation fails and `ignore_failure` - is `False`. + ServerLogLevelError: If the operation fails. References: - - `set-a-users-database-access-level `__ - - `set-a-users-collection-access-level `__ + - `get-the-server-log-levels `__ """ # noqa: E501 - endpoint = f"/_api/user/{username}/database/{database}" - if collection is not None: - endpoint += f"/{collection}" + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders request = Request( - method=Method.PUT, - endpoint=endpoint, - data=self.serializer.dumps({"grant": permission}), + method=Method.GET, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, ) - def response_handler(resp: Response) -> bool: - if resp.is_success: - return True - if ignore_failure: - return False - raise PermissionUpdateError(resp, request) + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result return await self._executor.execute(request, response_handler) - async def reset_permission( + async def set_log_levels( self, - username: str, - database: str, - collection: Optional[str] = None, - ignore_failure: bool = False, - ) -> Result[bool]: - """Reset user permission for a specific database or collection. + server_id: Optional[str] = None, + with_appenders: Optional[bool] = None, + **kwargs: Dict[str, Any], + ) -> Result[Json]: + """Set the logging levels. + + This method takes arbitrary keyword arguments where the keys are the + logger names and the values are the logging levels. For example: + + .. code-block:: python + + db.set_log_levels( + agency='DEBUG', + collector='INFO', + threads='WARNING' + ) + + Keys that are not valid logger names are ignored. Args: - username (str): Username. - database (str): Database to reset the access level for. - collection (str | None): Collection to reset the access level for. - ignore_failure (bool): Do not raise an exception on failure. + server_id (str | None) -> Forward the request to a specific server. + with_appenders (bool | None): Include appenders in the response. + kwargs (dict): Logging levels to be set. Returns: - bool: `True` if the operation was successful. + dict: New logging levels. Raises: - PermissionResetError: If the operation fails and `ignore_failure` - is `False`. + ServerLogLevelSetError: If the operation fails. References: - - `clear-a-users-database-access-level `__ - - `clear-a-users-collection-access-level `__ + - `set-the-structured-log-settings `__ """ # noqa: E501 - endpoint = f"/_api/user/{username}/database/{database}" - if collection is not None: - endpoint += f"/{collection}" + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders request = Request( - method=Method.DELETE, - endpoint=endpoint, + method=Method.PUT, + endpoint="/_admin/log/level", + params=params, + data=self.serializer.dumps(kwargs), + prefix_needed=False, ) - def response_handler(resp: Response) -> bool: - if resp.is_success: - return True - if ignore_failure: - return False - raise PermissionResetError(resp, request) + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result return await self._executor.execute(request, response_handler) - async def jwt_secrets(self) -> Result[Json]: - """Return information on currently loaded JWT secrets. + async def reset_log_levels(self, server_id: Optional[str] = None) -> Result[Json]: + """Reset the logging levels. + + Revert the server’s log level settings to the values they had at startup, + as determined by the startup options specified on the command-line, + a configuration file, and the factory defaults. + + Args: + server_id: Forward the request to a specific server. Returns: - dict: JWT secrets. + dict: New logging levels. Raises: - JWTSecretListError: If the operation fails. + ServerLogLevelResetError: If the operation fails. References: - - `get-information-about-the-loaded-jwt-secrets `__ - """ # noqa: 501 - request = Request(method=Method.GET, endpoint="/_admin/server/jwt") + - `reset-the-server-log-levels `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.DELETE, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, + ) def response_handler(resp: Response) -> Json: if not resp.is_success: - raise JWTSecretListError(resp, request) + raise ServerLogLevelResetError(resp, request) result: Json = self.deserializer.loads(resp.raw_body) return result return await self._executor.execute(request, response_handler) - async def reload_jwt_secrets(self) -> Result[Json]: - """Hot_reload JWT secrets from disk. + async def log_settings(self) -> Result[Json]: + """Get the structured log settings. Returns: - dict: Information on reloaded JWT secrets. + dict: Current structured log settings. Raises: - JWTSecretReloadError: If the operation fails. + ServerLogSettingError: If the operation fails. References: - - `hot-reload-the-jwt-secrets-from-disk `__ - """ # noqa: 501 - request = Request(method=Method.POST, endpoint="/_admin/server/jwt") + - `get-the-structured-log-settings `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/log/structured", + prefix_needed=False, + ) def response_handler(resp: Response) -> Json: if not resp.is_success: - raise JWTSecretReloadError(resp, request) + raise ServerLogSettingError(resp, request) result: Json = self.deserializer.loads(resp.raw_body) return result return await self._executor.execute(request, response_handler) - async def list_transactions(self) -> Result[Jsons]: - """List all currently running stream transactions. - - Returns: - list: List of transactions, with each transaction containing - an "id" and a "state" field. - - Raises: - TransactionListError: If the operation fails on the server side. - """ - request = Request(method=Method.GET, endpoint="/_api/transaction") + async def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: + """Set the structured log settings. - def response_handler(resp: Response) -> Jsons: - if not resp.is_success: - raise TransactionListError(resp, request) - result: Json = self.deserializer.loads(resp.raw_body) - return cast(Jsons, result["transactions"]) - - return await self._executor.execute(request, response_handler) + This method takes arbitrary keyword arguments where the keys are the + structured log parameters and the values are true or false, for either + enabling or disabling the parameters. - async def execute_transaction( - self, - command: str, - params: Optional[Json] = None, - read: Optional[str | Sequence[str]] = None, - write: Optional[str | Sequence[str]] = None, - exclusive: Optional[str | Sequence[str]] = None, - allow_implicit: Optional[bool] = None, - wait_for_sync: Optional[bool] = None, - lock_timeout: Optional[int] = None, - max_transaction_size: Optional[int] = None, - ) -> Result[Any]: - """Execute a JavaScript Transaction. + .. code-block:: python - Warning: - JavaScript Transactions are deprecated from ArangoDB v3.12.0 onward and - will be removed in a future version. + db.set_log_settings( + database=True, + url=True, + username=False, + ) Args: - command (str): The actual transaction operations to be executed, in the - form of stringified JavaScript code. - params (dict): Optional parameters passed into the JavaScript command. - read (str | list | None): Name(s) of collections read during transaction. - write (str | list | None): Name(s) of collections written to during - transaction with shared access. - exclusive (str | list | None): Name(s) of collections written to during - transaction with exclusive access. - allow_implicit (bool | None): Allow reading from undeclared collections. - wait_for_sync (bool | None): If `True`, will force the transaction to write - all data to disk before returning. - lock_timeout (int | None): Timeout for waiting on collection locks. Setting - it to 0 will prevent ArangoDB from timing out while waiting for a lock. - max_transaction_size (int | None): Transaction size limit in bytes. + kwargs (dict): Structured log parameters to be set. Returns: - Any: Result of the transaction. + dict: New structured log settings. Raises: - TransactionExecuteError: If the operation fails on the server side. + ServerLogSettingSetError: If the operation fails. References: - - `execute-a-javascript-transaction `__ - """ # noqa: 501 - m = "JavaScript Transactions are deprecated from ArangoDB v3.12.0 onward and will be removed in a future version." # noqa: E501 - warn(m, DeprecationWarning, stacklevel=2) - - collections = dict() - if read is not None: - collections["read"] = read - if write is not None: - collections["write"] = write - if exclusive is not None: - collections["exclusive"] = exclusive - - data: Json = dict(collections=collections, action=command) - if params is not None: - data["params"] = params - if wait_for_sync is not None: - data["waitForSync"] = wait_for_sync - if allow_implicit is not None: - data["allowImplicit"] = allow_implicit - if lock_timeout is not None: - data["lockTimeout"] = lock_timeout - if max_transaction_size is not None: - data["maxTransactionSize"] = max_transaction_size - + - `set-the-structured-log-settings `__ + """ # noqa: E501 request = Request( - method=Method.POST, - endpoint="/_api/transaction", - data=self.serializer.dumps(data), + method=Method.PUT, + endpoint="/_admin/log/structured", + data=self.serializer.dumps(kwargs), + prefix_needed=False, ) - def response_handler(resp: Response) -> Any: + def response_handler(resp: Response) -> Json: if not resp.is_success: - raise TransactionExecuteError(resp, request) - return self.deserializer.loads(resp.raw_body)["result"] + raise ServerLogSettingSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result return await self._executor.execute(request, response_handler) - async def version(self, details: bool = False) -> Result[Json]: - """Return the server version information. - - Args: - details (bool): If `True`, return detailed version information. + async def api_calls(self) -> Result[Json]: + """Get a list of the most recent requests with a timestamp and the endpoint. Returns: - dict: Server version information. + dict: API calls made to the server. Raises: - ServerVersionError: If the operation fails on the server side. + ServerApiCallsError: If the operation fails. References: - - `get-the-server-version `__ + - `get-recent-api-calls `__ """ # noqa: E501 request = Request( - method=Method.GET, endpoint="/_api/version", params={"details": details} + method=Method.GET, + endpoint="/_admin/server/api-calls", ) def response_handler(resp: Response) -> Json: if not resp.is_success: - raise ServerVersionError(resp, request) - return self.deserializer.loads(resp.raw_body) + raise ServerApiCallsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result return await self._executor.execute(request, response_handler) @@ -1501,7 +3379,7 @@ async def begin_transaction( TransactionInitError: If the operation fails on the server side. References: - - `begin-a-stream-transaction `__ + - `begin-a-stream-transaction `__ """ # noqa: E501 collections = dict() if read is not None: @@ -1585,7 +3463,7 @@ async def async_jobs( AsyncJobListError: If retrieval fails. References: - - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ + - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ """ # noqa: E501 params: Params = {} if count is not None: @@ -1618,7 +3496,7 @@ async def clear_async_jobs(self, threshold: Optional[float] = None) -> None: AsyncJobClearError: If the operation fails. References: - - `delete-async-job-results `__ + - `delete-async-job-results `__ """ # noqa: E501 if threshold is None: request = Request(method=Method.DELETE, endpoint="/_api/job/all") @@ -1638,7 +3516,7 @@ def response_handler(resp: Response) -> None: class TransactionDatabase(Database): """Database API tailored specifically for - `Stream Transactions `__. + `Stream Transactions `__. It allows you start a transaction, run multiple operations (eg. AQL queries) over a short period of time, and then commit or abort the transaction. @@ -1673,7 +3551,7 @@ async def transaction_status(self) -> str: TransactionStatusError: If the transaction is not found. References: - - `get-the-status-of-a-stream-transaction `__ + - `get-the-status-of-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -1695,7 +3573,7 @@ async def commit_transaction(self) -> None: TransactionCommitError: If the operation fails on the server side. References: - - `commit-a-stream-transaction `__ + - `commit-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.PUT, @@ -1715,7 +3593,7 @@ async def abort_transaction(self) -> None: TransactionAbortError: If the operation fails on the server side. References: - - `abort-a-stream-transaction `__ + - `abort-a-stream-transaction `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -1742,7 +3620,7 @@ class AsyncDatabase(Database): and no results are stored on server. References: - - `jobs `__ + - `jobs `__ """ # noqa: E501 def __init__(self, connection: Connection, return_result: bool) -> None: diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index c4ee40a..58a9505 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -111,6 +111,10 @@ class AQLQueryExplainError(ArangoServerError): """Failed to parse and explain query.""" +class AQLQueryHistoryError(ArangoServerError): + """Failed to retrieve running AQL queries.""" + + class AQLQueryKillError(ArangoServerError): """Failed to kill the query.""" @@ -135,6 +139,34 @@ class AQLQueryValidateError(ArangoServerError): """Failed to parse and validate query.""" +class AccessTokenCreateError(ArangoServerError): + """Failed to create an access token.""" + + +class AccessTokenDeleteError(ArangoServerError): + """Failed to delete an access token.""" + + +class AccessTokenListError(ArangoServerError): + """Failed to retrieve access tokens.""" + + +class AnalyzerCreateError(ArangoServerError): + """Failed to create analyzer.""" + + +class AnalyzerGetError(ArangoServerError): + """Failed to retrieve analyzer details.""" + + +class AnalyzerDeleteError(ArangoServerError): + """Failed to delete analyzer.""" + + +class AnalyzerListError(ArangoServerError): + """Failed to retrieve analyzers.""" + + class AsyncExecuteError(ArangoServerError): """Failed to execute async API request.""" @@ -163,14 +195,54 @@ class AuthHeaderError(ArangoClientError): """The authentication header could not be determined.""" +class BackupCreateError(ArangoServerError): + """Failed to create a backup.""" + + +class BackupDeleteError(ArangoServerError): + """Failed to delete a backup.""" + + +class BackupDownloadError(ArangoServerError): + """Failed to download a backup from remote repository.""" + + +class BackupGetError(ArangoServerError): + """Failed to retrieve backup details.""" + + +class BackupRestoreError(ArangoServerError): + """Failed to restore from backup.""" + + +class BackupUploadError(ArangoServerError): + """Failed to upload a backup to remote repository.""" + + class CollectionCreateError(ArangoServerError): """Failed to create collection.""" +class CollectionChecksumError(ArangoServerError): + """Failed to retrieve collection checksum.""" + + +class CollectionConfigureError(ArangoServerError): + """Failed to configure collection properties.""" + + +class CollectionCompactError(ArangoServerError): + """Failed to compact collection.""" + + class CollectionDeleteError(ArangoServerError): """Failed to delete collection.""" +class CollectionKeyGeneratorsError(ArangoServerError): + """Failed to retrieve key generators.""" + + class CollectionListError(ArangoServerError): """Failed to retrieve collections.""" @@ -179,6 +251,34 @@ class CollectionPropertiesError(ArangoServerError): """Failed to retrieve collection properties.""" +class CollectionRecalculateCountError(ArangoServerError): + """Failed to recalculate document count.""" + + +class CollectionRenameError(ArangoServerError): + """Failed to rename collection.""" + + +class CollectionResponsibleShardError(ArangoServerError): + """Failed to retrieve responsible shard.""" + + +class CollectionRevisionError(ArangoServerError): + """Failed to retrieve collection revision.""" + + +class CollectionShardsError(ArangoServerError): + """Failed to retrieve collection shards.""" + + +class CollectionStatisticsError(ArangoServerError): + """Failed to retrieve collection statistics.""" + + +class CollectionTruncateError(ArangoServerError): + """Failed to truncate collection.""" + + class ClientConnectionAbortedError(ArangoClientError): """The connection was aborted.""" @@ -187,8 +287,32 @@ class ClientConnectionError(ArangoClientError): """The request was unable to reach the server.""" -class CollectionTruncateError(ArangoServerError): - """Failed to truncate collection.""" +class ClusterEndpointsError(ArangoServerError): + """Failed to retrieve coordinator endpoints.""" + + +class ClusterHealthError(ArangoServerError): + """Failed to retrieve cluster health.""" + + +class ClusterMaintenanceModeError(ArangoServerError): + """Failed to enable/disable cluster supervision maintenance mode.""" + + +class ClusterRebalanceError(ArangoServerError): + """Failed to execute cluster rebalancing operation.""" + + +class ClusterServerRoleError(ArangoServerError): + """Failed to retrieve server role in a cluster.""" + + +class ClusterServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + +class ClusterStatisticsError(ArangoServerError): + """Failed to retrieve DB-Server statistics.""" class CursorCloseError(ArangoServerError): @@ -211,6 +335,10 @@ class CursorStateError(ArangoClientError): """The cursor object was in a bad state.""" +class DatabaseCompactError(ArangoServerError): + """Failed to compact databases.""" + + class DatabaseCreateError(ArangoServerError): """Failed to create database.""" @@ -227,6 +355,10 @@ class DatabasePropertiesError(ArangoServerError): """Failed to retrieve database properties.""" +class DatabaseSupportInfoError(ArangoServerError): + """Failed to retrieve support info for deployment.""" + + class DeserializationError(ArangoClientError): """Failed to deserialize the server response.""" @@ -287,6 +419,90 @@ class EdgeListError(ArangoServerError): """Failed to retrieve edges coming in and out of a vertex.""" +class FoxxConfigGetError(ArangoServerError): + """Failed to retrieve Foxx service configuration.""" + + +class FoxxConfigReplaceError(ArangoServerError): + """Failed to replace Foxx service configuration.""" + + +class FoxxConfigUpdateError(ArangoServerError): + """Failed to update Foxx service configuration.""" + + +class FoxxCommitError(ArangoServerError): + """Failed to commit local Foxx service state.""" + + +class FoxxDependencyGetError(ArangoServerError): + """Failed to retrieve Foxx service dependencies.""" + + +class FoxxDependencyReplaceError(ArangoServerError): + """Failed to replace Foxx service dependencies.""" + + +class FoxxDependencyUpdateError(ArangoServerError): + """Failed to update Foxx service dependencies.""" + + +class FoxxScriptListError(ArangoServerError): + """Failed to retrieve Foxx service scripts.""" + + +class FoxxDevModeEnableError(ArangoServerError): + """Failed to enable development mode for Foxx service.""" + + +class FoxxDevModeDisableError(ArangoServerError): + """Failed to disable development mode for Foxx service.""" + + +class FoxxDownloadError(ArangoServerError): + """Failed to download Foxx service bundle.""" + + +class FoxxReadmeGetError(ArangoServerError): + """Failed to retrieve Foxx service readme.""" + + +class FoxxScriptRunError(ArangoServerError): + """Failed to run Foxx service script.""" + + +class FoxxServiceCreateError(ArangoServerError): + """Failed to create Foxx service.""" + + +class FoxxServiceDeleteError(ArangoServerError): + """Failed to delete Foxx services.""" + + +class FoxxServiceGetError(ArangoServerError): + """Failed to retrieve Foxx service metadata.""" + + +class FoxxServiceListError(ArangoServerError): + """Failed to retrieve Foxx services.""" + + +class FoxxServiceReplaceError(ArangoServerError): + """Failed to replace Foxx service.""" + + +class FoxxServiceUpdateError(ArangoServerError): + """Failed to update Foxx service.""" + + +class FoxxSwaggerGetError(ArangoServerError): + """Failed to retrieve Foxx service swagger.""" + + +class FoxxTestRunError(ArangoServerError): + """Failed to run Foxx service tests.""" + + class GraphCreateError(ArangoServerError): """Failed to create the graph.""" @@ -351,18 +567,146 @@ class PermissionUpdateError(ArangoServerError): """Failed to update user permission.""" +class ReplicationApplierConfigError(ArangoServerError): + """Failed to retrieve replication applier configuration.""" + + +class ReplicationApplierStateError(ArangoServerError): + """Failed to retrieve replication applier state.""" + + +class ReplicationClusterInventoryError(ArangoServerError): + """Failed to retrieve overview of collection and indexes in a cluster.""" + + +class ReplicationDumpError(ArangoServerError): + """Failed to retrieve collection content.""" + + +class ReplicationInventoryError(ArangoServerError): + """Failed to retrieve inventory of collection and indexes.""" + + +class ReplicationLoggerStateError(ArangoServerError): + """Failed to retrieve logger state.""" + + +class ReplicationServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + class SerializationError(ArangoClientError): """Failed to serialize the request.""" +class ServerApiCallsError(ArangoServerError): + """Failed to retrieve the list of recent API calls.""" + + +class ServerAvailableOptionsGetError(ArangoServerError): + """Failed to retrieve available server options.""" + + +class ServerCheckAvailabilityError(ArangoServerError): + """Failed to retrieve server availability mode.""" + + class ServerConnectionError(ArangoServerError): """Failed to connect to ArangoDB server.""" +class ServerCurrentOptionsGetError(ArangoServerError): + """Failed to retrieve currently-set server options.""" + + +class ServerEchoError(ArangoServerError): + """Failed to retrieve details on last request.""" + + +class ServerEncryptionError(ArangoServerError): + """Failed to reload user-defined encryption keys.""" + + +class ServerEngineError(ArangoServerError): + """Failed to retrieve database engine.""" + + +class ServerExecuteError(ArangoServerError): + """Failed to execute raw JavaScript command.""" + + +class ServerMetricsError(ArangoServerError): + """Failed to retrieve server metrics.""" + + +class ServerModeError(ArangoServerError): + """Failed to retrieve server mode.""" + + +class ServerModeSetError(ArangoServerError): + """Failed to set server mode.""" + + +class ServerLicenseGetError(ArangoServerError): + """Failed to retrieve server license.""" + + +class ServerLicenseSetError(ArangoServerError): + """Failed to set server license.""" + + +class ServerLogLevelError(ArangoServerError): + """Failed to retrieve server log levels.""" + + +class ServerLogLevelResetError(ArangoServerError): + """Failed to reset server log levels.""" + + +class ServerLogLevelSetError(ArangoServerError): + """Failed to set server log levels.""" + + +class ServerLogSettingError(ArangoServerError): + """Failed to retrieve server log settings.""" + + +class ServerLogSettingSetError(ArangoServerError): + """Failed to set server log settings.""" + + +class ServerReadLogError(ArangoServerError): + """Failed to retrieve global log.""" + + +class ServerReloadRoutingError(ArangoServerError): + """Failed to reload routing details.""" + + +class ServerShutdownError(ArangoServerError): + """Failed to initiate shutdown sequence.""" + + +class ServerShutdownProgressError(ArangoServerError): + """Failed to retrieve soft shutdown progress.""" + + class ServerStatusError(ArangoServerError): """Failed to retrieve server status.""" +class ServerTLSError(ArangoServerError): + """Failed to retrieve TLS data.""" + + +class ServerTLSReloadError(ArangoServerError): + """Failed to reload TLS.""" + + +class ServerTimeError(ArangoServerError): + """Failed to retrieve server system time.""" + + class ServerVersionError(ArangoServerError): """Failed to retrieve server version.""" @@ -371,6 +715,22 @@ class SortValidationError(ArangoClientError): """Invalid sort parameters.""" +class TaskCreateError(ArangoServerError): + """Failed to create server task.""" + + +class TaskDeleteError(ArangoServerError): + """Failed to delete server task.""" + + +class TaskGetError(ArangoServerError): + """Failed to retrieve server task details.""" + + +class TaskListError(ArangoServerError): + """Failed to retrieve server tasks.""" + + class TransactionAbortError(ArangoServerError): """Failed to abort transaction.""" @@ -429,3 +789,31 @@ class VertexCollectionDeleteError(ArangoServerError): class VertexCollectionListError(ArangoServerError): """Failed to retrieve vertex collections.""" + + +class ViewCreateError(ArangoServerError): + """Failed to create view.""" + + +class ViewDeleteError(ArangoServerError): + """Failed to delete view.""" + + +class ViewGetError(ArangoServerError): + """Failed to retrieve view details.""" + + +class ViewListError(ArangoServerError): + """Failed to retrieve views.""" + + +class ViewRenameError(ArangoServerError): + """Failed to rename view.""" + + +class ViewReplaceError(ArangoServerError): + """Failed to replace view.""" + + +class ViewUpdateError(ArangoServerError): + """Failed to update view.""" diff --git a/arangoasync/foxx.py b/arangoasync/foxx.py new file mode 100644 index 0000000..0b068da --- /dev/null +++ b/arangoasync/foxx.py @@ -0,0 +1,829 @@ +__all__ = ["Foxx"] + +from typing import Any, Optional + +from arangoasync.exceptions import ( + FoxxCommitError, + FoxxConfigGetError, + FoxxConfigReplaceError, + FoxxConfigUpdateError, + FoxxDependencyGetError, + FoxxDependencyReplaceError, + FoxxDependencyUpdateError, + FoxxDevModeDisableError, + FoxxDevModeEnableError, + FoxxDownloadError, + FoxxReadmeGetError, + FoxxScriptListError, + FoxxScriptRunError, + FoxxServiceCreateError, + FoxxServiceDeleteError, + FoxxServiceGetError, + FoxxServiceListError, + FoxxServiceReplaceError, + FoxxServiceUpdateError, + FoxxSwaggerGetError, + FoxxTestRunError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params, RequestHeaders + + +class Foxx: + """Foxx API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + def __repr__(self) -> str: + return f"" + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def services(self, exclude_system: Optional[bool] = False) -> Result[Jsons]: + """List installed services. + + Args: + exclude_system (bool | None): Exclude system services. + + Returns: + list: List of installed services. + + Raises: + FoxxServiceListError: If retrieval fails. + + References: + - `list-the-installed-services `__ + """ # noqa: E501 + params: Params = {} + if exclude_system is not None: + params["excludeSystem"] = exclude_system + + request = Request( + method=Method.GET, + endpoint="/_api/foxx", + params=params, + ) + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise FoxxServiceListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def service(self, mount: str) -> Result[Json]: + """Return service metadata. + + Args: + mount (str): Service mount path (e.g "/_admin/aardvark"). + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceGetError: If retrieval fails. + + References: + - `get-the-service-description `__ + """ # noqa: E501 + params: Params = {"mount": mount} + request = Request( + method=Method.GET, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + development: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + ) -> Result[Json]: + """Installs the given new service at the given mount path. + + Args: + mount (str): Mount path the service should be installed at. + service (Any): Service payload. Can be a JSON string, a file-like object, or a + multipart form. + headers (dict | None): Request headers. + development (bool | None): Whether to install the service in development mode. + setup (bool | None): Whether to run the service setup script. + legacy (bool | None): Whether to install in legacy mode. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceCreateError: If installation fails. + + References: + - `install-a-new-service-mode `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if development is not None: + params["development"] = development + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.POST, + endpoint="/_api/foxx", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_service( + self, + mount: str, + teardown: Optional[bool] = None, + ) -> None: + """Removes the service at the given mount path from the database and file system. + + Args: + mount (str): Mount path of the service to uninstall. + teardown (bool | None): Whether to run the teardown script. + + Raises: + FoxxServiceDeleteError: If operations fails. + + References: + - `uninstall-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxServiceDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def replace_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Replace an existing Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to replace. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to install in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceReplaceError: If replacement fails. + + References: + - `replace-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Upgrade a Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to upgrade. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to upgrade in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceUpdateError: If upgrade fails. + + References: + - `upgrade-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def config(self, mount: str) -> Result[Json]: + """Return service configuration. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service configuration. + + Raises: + FoxxConfigGetError: If retrieval fails. + + References: + - `get-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_config(self, mount: str, options: Json) -> Result[Json]: + """Update service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are ignored. + + Returns: + dict: Updated configuration values. + + Raises: + FoxxConfigUpdateError: If update fails. + + References: + - `update-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_config(self, mount: str, options: Json) -> Result[Json]: + """Replace service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are reset to their + default values or marked as un-configured. + + Returns: + dict: Replaced configuration values. + + Raises: + FoxxConfigReplaceError: If replace fails. + + References: + - `replace-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dependencies(self, mount: str) -> Result[Json]: + """Return service dependencies. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service dependencies settings. + + Raises: + FoxxDependencyGetError: If retrieval fails. + + References: + - `get-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Update service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are ignored. + + Returns: + dict: Updated dependency settings. + + Raises: + FoxxDependencyUpdateError: If update fails. + + References: + - `update-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Replace service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are disabled. + + Returns: + dict: Replaced dependency settings. + + Raises: + FoxxDependencyReplaceError: If replace fails. + + References: + - `replace-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def scripts(self, mount: str) -> Result[Json]: + """List service scripts. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service scripts. + + Raises: + FoxxScriptListError: If retrieval fails. + + References: + - `list-the-service-scripts `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/scripts", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxScriptListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_script( + self, mount: str, name: str, arg: Optional[Json] = None + ) -> Result[Any]: + """Run a service script. + + Args: + mount (str): Service mount path. + name (str): Script name. + arg (dict | None): Arbitrary value passed into the script as first argument. + + Returns: + Any: Returns the exports of the script, if any. + + Raises: + FoxxScriptRunError: If script fails. + + References: + - `run-a-service-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint=f"/_api/foxx/scripts/{name}", + params={"mount": mount}, + data=self.serializer.dumps(arg) if arg is not None else None, + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise FoxxScriptRunError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_tests( + self, + mount: str, + reporter: Optional[str] = None, + idiomatic: Optional[bool] = None, + filter: Optional[str] = None, + output_format: Optional[str] = None, + ) -> Result[str]: + """Run service tests. + + Args: + mount (str): Service mount path. + reporter (str | None): Test reporter. Allowed values are "default" (simple + list of test cases), "suite" (object of test cases nested in + suites), "stream" (raw stream of test results), "xunit" (XUnit or + JUnit compatible structure), or "tap" (raw TAP compatible stream). + idiomatic (bool | None): Use matching format for the reporter, regardless of + the value of parameter **output_format**. + filter (str | None): Only run tests whose full name (test suite and + test case) matches the given string. + output_format (str | None): Used to further control format. Allowed values + are "x-ldjson", "xml" and "text". When using "stream" reporter, + setting this to "x-ldjson" returns newline-delimited JSON stream. + When using "tap" reporter, setting this to "text" returns plain + text TAP report. When using "xunit" reporter, settings this to + "xml" returns an XML instead of JSONML. + + Returns: + str: Reporter output (e.g. raw JSON string, XML, plain text). + + Raises: + FoxxTestRunError: If test fails. + + References: + - `run-the-service-tests `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if reporter is not None: + params["reporter"] = reporter + if idiomatic is not None: + params["idiomatic"] = idiomatic + if filter is not None: + params["filter"] = filter + + headers: RequestHeaders = {} + if output_format == "x-ldjson": + headers["accept"] = "application/x-ldjson" + elif output_format == "xml": + headers["accept"] = "application/xml" + elif output_format == "text": + headers["accept"] = "text/plain" + + request = Request( + method=Method.POST, + endpoint="/_api/foxx/tests", + params=params, + headers=headers, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxTestRunError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def enable_development(self, mount: str) -> Result[Json]: + """Puts the service into development mode. + + While the service is running in development mode, it is reloaded from + the file system, and its setup script (if any) is re-executed every + time the service handles a request. + + In a cluster with multiple coordinators, changes to the filesystem on + one coordinator is not reflected across other coordinators. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeEnableError: If the operation fails. + + References: + - `enable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeEnableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def disable_development(self, mount: str) -> Result[Json]: + """Puts the service into production mode. + + In a cluster with multiple coordinators, the services on all other + coordinators are replaced with the version on the calling coordinator. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeDisableError: If the operation fails. + + References: + - `disable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeDisableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def readme(self, mount: str) -> Result[str]: + """Return the service readme. + + Args: + mount (str): Service mount path. + + Returns: + str: Service readme content. + + Raises: + FoxxReadmeGetError: If retrieval fails. + + References: + - `get-the-service-readme `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/readme", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxReadmeGetError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def swagger(self, mount: str) -> Result[Json]: + """Return the Swagger API description for the given service. + + Args: + mount (str): Service mount path. + + Returns: + dict: Swagger API description. + + Raises: + FoxxSwaggerGetError: If retrieval fails. + + References: + - `get-the-swagger-description `__ + """ # noqa: E501 + request = Request( + method=Method.GET, endpoint="/_api/foxx/swagger", params={"mount": mount} + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxSwaggerGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def download(self, mount: str) -> Result[bytes]: + """Downloads a zip bundle of the service directory. + + When development mode is enabled, this always creates a new bundle. + Otherwise, the bundle will represent the version of a service that is + installed on that ArangoDB instance. + + Args: + mount (str): Service mount path. + + Returns: + bytes: Service bundle zip in raw bytes form. + + Raises: + FoxxDownloadError: If download fails. + + References: + - `download-a-service-bundle `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_api/foxx/download", params={"mount": mount} + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise FoxxDownloadError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def commit(self, replace: Optional[bool] = None) -> None: + """Commit local service state of the coordinator to the database. + + This can be used to resolve service conflicts between coordinators + that cannot be fixed automatically due to missing data. + + Args: + replace (bool | None): If set to `True`, any existing service files in the database + will be overwritten. + + Raises: + FoxxCommitError: If commit fails. + + References: + - `commit-the-local-service-state `__ + """ # noqa: E501 + params: Params = {} + if replace is not None: + params["replace"] = replace + + request = Request( + method=Method.POST, endpoint="/_api/foxx/commit", params=params + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxCommitError(resp, request) + + await self._executor.execute(request, response_handler) diff --git a/arangoasync/graph.py b/arangoasync/graph.py index 059a53e..1fba982 100644 --- a/arangoasync/graph.py +++ b/arangoasync/graph.py @@ -93,7 +93,7 @@ async def properties(self) -> Result[GraphProperties]: GraphProperties: If the operation fails. References: - - `get-a-graph `__ + - `get-a-graph `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/gharial/{self._name}") @@ -132,7 +132,7 @@ async def vertex_collections(self) -> Result[List[str]]: VertexCollectionListError: If the operation fails. References: - - `list-vertex-collections `__ + - `list-vertex-collections `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -191,7 +191,7 @@ async def create_vertex_collection( VertexCollectionCreateError: If the operation fails. References: - - `add-a-vertex-collection `__ + - `add-a-vertex-collection `__ """ # noqa: E501 data: Json = {"collection": name} @@ -228,7 +228,7 @@ async def delete_vertex_collection(self, name: str, purge: bool = False) -> None VertexCollectionDeleteError: If the operation fails. References: - - `remove-a-vertex-collection `__ + - `remove-a-vertex-collection `__ """ # noqa: E501 request = Request( method=Method.DELETE, @@ -300,7 +300,7 @@ async def vertex( DocumentParseError: If the document is malformed. References: - - `get-a-vertex `__ + - `get-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(vertex) return await self.vertex_collection(col).get( @@ -337,7 +337,7 @@ async def insert_vertex( DocumentParseError: If the document is malformed. References: - - `create-a-vertex `__ + - `create-a-vertex `__ """ # noqa: E501 return await self.vertex_collection(collection).insert( vertex, @@ -379,7 +379,7 @@ async def update_vertex( DocumentUpdateError: If update fails. References: - - `update-a-vertex `__ + - `update-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).update( @@ -425,7 +425,7 @@ async def replace_vertex( DocumentReplaceError: If replace fails. References: - - `replace-a-vertex `__ + - `replace-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).replace( @@ -468,7 +468,7 @@ async def delete_vertex( DocumentDeleteError: If deletion fails. References: - - `remove-a-vertex `__ + - `remove-a-vertex `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, vertex)) return await self.vertex_collection(col).delete( @@ -551,7 +551,7 @@ async def edge_collections(self) -> Result[List[str]]: EdgeCollectionListError: If the operation fails. References: - - `list-edge-collections `__ + - `list-edge-collections `__ """ # noqa: E501 request = Request( method=Method.GET, @@ -602,7 +602,7 @@ async def create_edge_definition( EdgeDefinitionCreateError: If the operation fails. References: - - `add-an-edge-definition `__ + - `add-an-edge-definition `__ """ # noqa: E501 data: Json = { "collection": edge_collection, @@ -659,7 +659,7 @@ async def replace_edge_definition( EdgeDefinitionReplaceError: If the operation fails. References: - - `replace-an-edge-definition `__ + - `replace-an-edge-definition `__ """ # noqa: E501 data: Json = { "collection": edge_collection, @@ -712,7 +712,7 @@ async def delete_edge_definition( EdgeDefinitionDeleteError: If the operation fails. References: - - `remove-an-edge-definition `__ + - `remove-an-edge-definition `__ """ # noqa: E501 params: Params = {} if drop_collections is not None: @@ -793,7 +793,7 @@ async def edge( DocumentParseError: If the document is malformed. References: - - `get-an-edge `__ + - `get-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(edge) return await self.edge_collection(col).get( @@ -832,7 +832,7 @@ async def insert_edge( DocumentParseError: If the document is malformed. References: - - `create-an-edge `__ + - `create-an-edge `__ """ # noqa: E501 return await self.edge_collection(collection).insert( edge, @@ -875,7 +875,7 @@ async def update_edge( DocumentUpdateError: If update fails. References: - - `update-an-edge `__ + - `update-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).update( @@ -923,7 +923,7 @@ async def replace_edge( DocumentReplaceError: If replace fails. References: - - `replace-an-edge `__ + - `replace-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).replace( @@ -967,7 +967,7 @@ async def delete_edge( DocumentDeleteError: If deletion fails. References: - - `remove-an-edge `__ + - `remove-an-edge `__ """ # noqa: E501 col = Collection.get_col_name(cast(Json | str, edge)) return await self.edge_collection(col).delete( @@ -1001,7 +1001,7 @@ async def edges( EdgeListError: If retrieval fails. References: - - `get-inbound-and-outbound-edges `__ + - `get-inbound-and-outbound-edges `__ """ # noqa: E501 return await self.edge_collection(collection).edges( vertex, diff --git a/arangoasync/job.py b/arangoasync/job.py index 13794fe..bec3c6a 100644 --- a/arangoasync/job.py +++ b/arangoasync/job.py @@ -27,7 +27,7 @@ class AsyncJob(Generic[T]): response_handler: HTTP response handler References: - - `jobs `__ + - `jobs `__ """ # noqa: E501 def __init__( @@ -68,7 +68,7 @@ async def status(self) -> str: AsyncJobStatusError: If retrieval fails or the job is not found. References: - - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ + - `list-async-jobs-by-status-or-get-the-status-of-specific-job `__ """ # noqa: E501 request = Request(method=Method.GET, endpoint=f"/_api/job/{self._id}") response = await self._conn.send_request(request) @@ -101,7 +101,7 @@ async def result(self) -> T: is still pending. References: - - `get-the-results-of-an-async-job `__ + - `get-the-results-of-an-async-job `__ """ # noqa: E501 request = Request(method=Method.PUT, endpoint=f"/_api/job/{self._id}") response = await self._conn.send_request(request) @@ -142,7 +142,7 @@ async def cancel(self, ignore_missing: bool = False) -> bool: AsyncJobCancelError: If cancellation fails. References: - - `cancel-an-async-job `__ + - `cancel-an-async-job `__ """ # noqa: E501 request = Request(method=Method.PUT, endpoint=f"/_api/job/{self._id}/cancel") response = await self._conn.send_request(request) @@ -173,7 +173,7 @@ async def clear( AsyncJobClearError: If deletion fails. References: - - `delete-async-job-results `__ + - `delete-async-job-results `__ """ # noqa: E501 request = Request(method=Method.DELETE, endpoint=f"/_api/job/{self._id}") resp = await self._conn.send_request(request) diff --git a/arangoasync/replication.py b/arangoasync/replication.py new file mode 100644 index 0000000..e495e89 --- /dev/null +++ b/arangoasync/replication.py @@ -0,0 +1,270 @@ +__all__ = ["Replication"] + + +from typing import Optional + +from arangoasync.exceptions import ( + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Replication: + """Replication API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def inventory( + self, + batch_id: str, + include_system: Optional[bool] = None, + all_databases: Optional[bool] = None, + collection: Optional[bool] = None, + db_server: Optional[str] = None, + ) -> Result[Json]: + """ + Return an overview of collections and indexes. + + Args: + batch_id (str): Batch ID. + include_system (bool | None): Include system collections. + all_databases (bool | None): Include all databases (only on "_system"). + collection (bool | None): If this parameter is set, the + response will be restricted to a single collection (the one specified), + and no views will be returned. + db_server (str | None): On a Coordinator, this request must have a + DBserver query parameter + + Returns: + dict: Overview of collections and indexes. + + Raises: + ReplicationInventoryError: If retrieval fails. + + References: + - `get-a-replication-inventory `__ + """ # noqa: E501 + params: Params = dict() + params["batchId"] = batch_id + if include_system is not None: + params["includeSystem"] = include_system + if all_databases is not None: + params["global"] = all_databases + if collection is not None: + params["collection"] = collection + if db_server is not None: + params["DBServer"] = db_server + + request = Request( + method=Method.GET, + endpoint="/_api/replication/inventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dump( + self, + collection: str, + batch_id: Optional[str] = None, + chunk_size: Optional[int] = None, + ) -> Result[bytes]: + """Return the events data of one collection. + + Args: + collection (str): ID of the collection to dump. + batch_id (str | None): Batch ID. + chunk_size (int | None): Size of the result in bytes. This value is honored + approximately only. + + Returns: + bytes: Collection events data. + + Raises: + ReplicationDumpError: If retrieval fails. + + References: + - `get-a-replication-dump `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = collection + if batch_id is not None: + params["batchId"] = batch_id + if chunk_size is not None: + params["chunkSize"] = chunk_size + + request = Request( + method=Method.GET, + endpoint="/_api/replication/dump", + params=params, + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise ReplicationDumpError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def cluster_inventory( + self, include_system: Optional[bool] = None + ) -> Result[Json]: + """Return an overview of collections and indexes in a cluster. + + Args: + include_system (bool | None): Include system collections. + + Returns: + dict: Overview of collections and indexes in the cluster. + + Raises: + ReplicationClusterInventoryError: If retrieval fails. + + References: + - `get-the-cluster-collections-and-indexes `__ + """ # noqa: E501 + params: Params = {} + if include_system is not None: + params["includeSystem"] = include_system + + request = Request( + method=Method.GET, + endpoint="/_api/replication/clusterInventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationClusterInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def logger_state(self) -> Result[Json]: + """Return the state of the replication logger. + + Returns: + dict: Logger state. + + Raises: + ReplicationLoggerStateError: If retrieval fails. + + References: + - `get-the-replication-logger-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/logger-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationLoggerStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_config(self) -> Result[Json]: + """Return the configuration of the replication applier. + + Returns: + dict: Configuration of the replication applier. + + Raises: + ReplicationApplierConfigError: If retrieval fails. + + References: + - `get-the-replication-applier-configuration `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-config", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierConfigError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_state(self) -> Result[Json]: + """Return the state of the replication applier. + + Returns: + dict: State of the replication applier. + + Raises: + ReplicationApplierStateError: If retrieval fails. + + References: + - `get-the-replication-applier-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Return the current server's ID. + + Returns: + str: Server ID. + + Raises: + ReplicationServerIDError: If retrieval fails. + + References: + - `get-the-replication-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/server-id", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ReplicationServerIDError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["serverId"]) + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/request.py b/arangoasync/request.py index 951c9e9..9c43508 100644 --- a/arangoasync/request.py +++ b/arangoasync/request.py @@ -4,7 +4,7 @@ ] from enum import Enum, auto -from typing import Optional +from typing import Any, Optional from arangoasync.auth import Auth from arangoasync.typings import Params, RequestHeaders @@ -31,16 +31,18 @@ class Request: endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). Attributes: method (Method): HTTP method. endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). """ __slots__ = ( @@ -50,6 +52,7 @@ class Request: "params", "data", "auth", + "prefix_needed", ) def __init__( @@ -58,15 +61,17 @@ def __init__( endpoint: str, headers: Optional[RequestHeaders] = None, params: Optional[Params] = None, - data: Optional[bytes | str] = None, + data: Optional[Any] = None, auth: Optional[Auth] = None, + prefix_needed: bool = True, ) -> None: self.method: Method = method self.endpoint: str = endpoint self.headers: RequestHeaders = headers or dict() self.params: Params = params or dict() - self.data: Optional[bytes | str] = data + self.data: Optional[Any] = data self.auth: Optional[Auth] = auth + self.prefix_needed = prefix_needed def normalized_headers(self) -> RequestHeaders: """Normalize request headers. diff --git a/arangoasync/response.py b/arangoasync/response.py index 63b10fb..000def9 100644 --- a/arangoasync/response.py +++ b/arangoasync/response.py @@ -5,7 +5,7 @@ from typing import Optional from arangoasync.request import Method -from arangoasync.typings import ResponseHeaders +from arangoasync.typings import Json, ResponseHeaders class Response: @@ -63,3 +63,17 @@ def __init__( self.error_code: Optional[int] = None self.error_message: Optional[str] = None self.is_success: Optional[bool] = None + + @staticmethod + def format_body(body: Json) -> Json: + """Format the generic response body, stripping the error code and message. + + Args: + body (Json): The response body. + + Returns: + dict: The formatted response body. + """ + body.pop("error", None) + body.pop("code", None) + return body diff --git a/arangoasync/typings.py b/arangoasync/typings.py index 280e27e..cd1c472 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -223,7 +223,7 @@ class KeyOptions(JsonWrapper): } References: - - `create-a-collection `__ + - `create-a-collection `__ """ # noqa: E501 def __init__( @@ -310,7 +310,7 @@ class CollectionInfo(JsonWrapper): } References: - - `get-the-collection-information `__ + - `get-the-collection-information `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -384,7 +384,7 @@ class UserInfo(JsonWrapper): } References: - - `create-a-user `__ + - `create-a-user `__ """ # noqa: E501 def __init__( @@ -484,7 +484,7 @@ class ServerStatusInformation(JsonWrapper): } References: - - `get-server-status-information `__ + - `get-server-status-information `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -543,7 +543,7 @@ class DatabaseProperties(JsonWrapper): """Properties of the database. References: - - `get-information-about-the-current-database `__ + - `get-information-about-the-current-database `__ """ # noqa: E501 def __init__(self, data: Json, strip_result: bool = False) -> None: @@ -650,7 +650,7 @@ class CollectionProperties(JsonWrapper): } References: - - `get-the-properties-of-a-collection `__ + - `get-the-properties-of-a-collection `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -791,8 +791,6 @@ def compatibility_formatter(data: Json) -> Json: result["deleted"] = data["deleted"] if "syncByRevision" in data: result["sync_by_revision"] = data["syncByRevision"] - if "tempObjectId" in data: - result["temp_object_id"] = data["tempObjectId"] if "usesRevisionsAsDocumentIds" in data: result["rev_as_id"] = data["usesRevisionsAsDocumentIds"] if "isDisjoint" in data: @@ -819,6 +817,146 @@ def format(self, formatter: Optional[Formatter] = None) -> Json: return self.compatibility_formatter(self._data) +class CollectionStatistics(JsonWrapper): + """Statistical information about the collection. + + Example: + .. code-block:: json + + { + "figures" : { + "indexes" : { + "count" : 1, + "size" : 1234 + }, + "documentsSize" : 5601, + "cacheInUse" : false, + "cacheSize" : 0, + "cacheUsage" : 0, + "engine" : { + "documents" : 1, + "indexes" : [ + { + "type" : "primary", + "id" : 0, + "count" : 1 + } + ] + } + }, + "writeConcern" : 1, + "waitForSync" : false, + "usesRevisionsAsDocumentIds" : true, + "syncByRevision" : true, + "statusString" : "loaded", + "id" : "69123", + "isSmartChild" : false, + "schema" : null, + "name" : "products", + "type" : 2, + "status" : 3, + "count" : 1, + "cacheEnabled" : false, + "isSystem" : false, + "internalValidatorType" : 0, + "globallyUniqueId" : "hB7C02EE43DCE/69123", + "keyOptions" : { + "allowUserKeys" : true, + "type" : "traditional", + "lastValue" : 69129 + }, + "computedValues" : null, + "objectId" : "69124" + } + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def figures(self) -> Json: + return cast(Json, self._data.get("figures")) + + @property + def write_concern(self) -> Optional[int]: + return self._data.get("writeConcern") + + @property + def wait_for_sync(self) -> Optional[bool]: + return self._data.get("waitForSync") + + @property + def use_revisions_as_document_ids(self) -> Optional[bool]: + return self._data.get("usesRevisionsAsDocumentIds") + + @property + def sync_by_revision(self) -> Optional[bool]: + return self._data.get("syncByRevision") + + @property + def status_string(self) -> Optional[str]: + return self._data.get("statusString") + + @property + def id(self) -> str: + return self._data["id"] # type: ignore[no-any-return] + + @property + def is_smart_child(self) -> bool: + return self._data["isSmartChild"] # type: ignore[no-any-return] + + @property + def schema(self) -> Optional[Json]: + return self._data.get("schema") + + @property + def name(self) -> str: + return self._data["name"] # type: ignore[no-any-return] + + @property + def type(self) -> CollectionType: + return CollectionType.from_int(self._data["type"]) + + @property + def status(self) -> CollectionStatus: + return CollectionStatus.from_int(self._data["status"]) + + @property + def count(self) -> int: + return self._data["count"] # type: ignore[no-any-return] + + @property + def cache_enabled(self) -> Optional[bool]: + return self._data.get("cacheEnabled") + + @property + def is_system(self) -> bool: + return self._data["isSystem"] # type: ignore[no-any-return] + + @property + def internal_validator_type(self) -> Optional[int]: + return self._data.get("internalValidatorType") + + @property + def globally_unique_id(self) -> str: + return self._data["globallyUniqueId"] # type: ignore[no-any-return] + + @property + def key_options(self) -> KeyOptions: + return KeyOptions(self._data["keyOptions"]) + + @property + def computed_values(self) -> Optional[Json]: + return self._data.get("computedValues") + + @property + def object_id(self) -> str: + return self._data["objectId"] # type: ignore[no-any-return] + + class IndexProperties(JsonWrapper): """Properties of an index. @@ -838,7 +976,7 @@ class IndexProperties(JsonWrapper): } References: - - `get-an-index `__ + - `get-an-index `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1115,7 +1253,7 @@ class QueryProperties(JsonWrapper): } References: - - `create-a-cursor `__ + - `create-a-cursor `__ """ # noqa: E501 def __init__( @@ -1276,7 +1414,7 @@ class QueryExecutionPlan(JsonWrapper): """The execution plan of an AQL query. References: - - `plan `__ + - `plan `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1330,7 +1468,7 @@ class QueryExecutionProfile(JsonWrapper): } References: - - `profile `__ + - `profile `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1398,7 +1536,7 @@ class QueryExecutionStats(JsonWrapper): } References: - - `stats `__ + - `stats `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1477,7 +1615,7 @@ class QueryExecutionExtra(JsonWrapper): """Extra information about the query result. References: - - `extra `__ + - `extra `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1521,7 +1659,7 @@ class QueryTrackingConfiguration(JsonWrapper): } References: - - `get-the-aql-query-tracking-configuration `__ + - `get-the-aql-query-tracking-configuration `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1580,7 +1718,7 @@ class QueryExplainOptions(JsonWrapper): } References: - - `explain-an-aql-query `__ + - `explain-an-aql-query `__ """ # noqa: E501 def __init__( @@ -1626,8 +1764,8 @@ class QueryCacheProperties(JsonWrapper): } References: - - `get-the-aql-query-results-cache-configuration `__ - - `set-the-aql-query-results-cache-configuration `__ + - `get-the-aql-query-results-cache-configuration `__ + - `set-the-aql-query-results-cache-configuration `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1680,9 +1818,9 @@ class GraphProperties(JsonWrapper): } References: - - `get-a-graph `__ - - `list-all-graphs `__ - - `create-a-graph `__ + - `get-a-graph `__ + - `list-all-graphs `__ + - `create-a-graph `__ """ # noqa: E501 def __init__(self, data: Json) -> None: @@ -1789,7 +1927,7 @@ class GraphOptions(JsonWrapper): graph. References: - - `create-a-graph `__ + - `create-a-graph `__ """ # noqa: E501 def __init__( @@ -1844,7 +1982,7 @@ class VertexCollectionOptions(JsonWrapper): be a string and a valid collection name. References: - - `add-a-vertex-collection `__ + - `add-a-vertex-collection `__ """ # noqa: E501 def __init__( @@ -1871,7 +2009,7 @@ class EdgeDefinitionOptions(JsonWrapper): be a string and a valid collection name. References: - - `add-an-edge-definition `__ + - `add-an-edge-definition `__ """ # noqa: E501 def __init__( @@ -1886,3 +2024,55 @@ def __init__( @property def satellites(self) -> Optional[List[str]]: return cast(Optional[List[str]], self._data.get("satellites")) + + +class AccessToken(JsonWrapper): + """User access token. + + Example: + .. code-block:: json + + { + "id" : 1, + "name" : "Token for Service A", + "valid_until" : 1782864000, + "created_at" : 1765543306, + "fingerprint" : "v1...71227d", + "active" : true, + "token" : "v1.7b2265223a3137471227d" + } + + References: + - `create-an-access-token `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def active(self) -> bool: + return cast(bool, self._data["active"]) + + @property + def created_at(self) -> int: + return cast(int, self._data["created_at"]) + + @property + def fingerprint(self) -> str: + return cast(str, self._data["fingerprint"]) + + @property + def id(self) -> int: + return cast(int, self._data["id"]) + + @property + def name(self) -> str: + return cast(str, self._data["name"]) + + @property + def token(self) -> str: + return cast(str, self._data["token"]) + + @property + def valid_until(self) -> int: + return cast(int, self._data["valid_until"]) diff --git a/arangoasync/version.py b/arangoasync/version.py index 81f0fde..68cdeee 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "0.0.4" +__version__ = "1.0.5" diff --git a/docs/admin.rst b/docs/admin.rst new file mode 100644 index 0000000..6120567 --- /dev/null +++ b/docs/admin.rst @@ -0,0 +1,50 @@ +Server Administration +--------------------- + +ArangoDB provides operations for server administration and monitoring. +Most of these operations can only be performed by admin users via the +``_system`` database. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + sys_db = await client.db("_system", auth=auth) + + # Retrieve the database engine. + await sys_db.engine() + + # Retrieve the server time.. + time = await sys_db.time() + + # Check server availability + availability = sys_db.check_availability() + + # Support info + info = sys_db.support_info() + + # Get the startup option configuration + options = await sys_db.options() + + # Get the available startup options + options = await sys_db.options_available() + + # Return whether or not a server is in read-only mode + mode = await sys_db.mode() + + # Get license information + license = await sys_db.license() + + # Execute Javascript on the server + result = await sys_db.execute("return 1") + + # Get metrics in Prometheus format + metrics = await db.metrics() diff --git a/docs/analyzer.rst b/docs/analyzer.rst new file mode 100644 index 0000000..851ab02 --- /dev/null +++ b/docs/analyzer.rst @@ -0,0 +1,39 @@ +Analyzers +--------- + +For more information on analyzers, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arango.ai + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create an analyzer. + await db.create_analyzer( + name='test_analyzer', + analyzer_type='identity', + properties={}, + features=[] + ) + + # Retrieve the created analyzer. + analyzer = await db.analyzer('test_analyzer') + + # Retrieve list of analyzers. + await db.analyzers() + + # Delete an analyzer. + await db.delete_analyzer('test_analyzer', ignore_missing=True) + +Refer to :class:`arangoasync.database.StandardDatabase` class for API specification. diff --git a/docs/aql.rst b/docs/aql.rst index 69a9bf6..97d4f6c 100644 --- a/docs/aql.rst +++ b/docs/aql.rst @@ -7,7 +7,7 @@ operations such as creating or deleting :doc:`databases `, :doc:`collections ` or :doc:`indexes `. For more information, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai AQL Queries =========== diff --git a/docs/backup.rst b/docs/backup.rst new file mode 100644 index 0000000..93085f0 --- /dev/null +++ b/docs/backup.rst @@ -0,0 +1,78 @@ +Backups +------- + +Hot Backups are near instantaneous consistent snapshots of an entire ArangoDB deployment. +This includes all databases, collections, indexes, Views, graphs, and users at any given time. +For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arango.ai + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import JwtToken + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + token = JwtToken.generate_token(LOGIN_SECRET) + + # Connect to "_system" database as root user. + db = await client.db( + "_system", auth_method="superuser", token=token, verify=True + ) + + # Get the backup API wrapper. + backup = db.backup + + # Create a backup. + result = await backup.create( + label="foo", + allow_inconsistent=True, + force=False, + timeout=1000 + ) + backup_id = result["id"] + + # Retrieve details on all backups + backups = await backup.get() + + # Retrieve details on a specific backup. + details = await backup.get(backup_id=backup_id) + + # Upload a backup to a remote repository. + result = await backup.upload( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + upload_id = result["uploadId"] + + # Get status of an upload. + status = await backup.upload(upload_id=upload_id) + + # Abort an upload. + await backup.upload(upload_id=upload_id, abort=True) + + # Download a backup from a remote repository. + result = await backup.download( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + download_id = result["downloadId"] + + # Get status of an download. + status = await backup.download(download_id=download_id) + + # Abort an download. + await backup.download(download_id=download_id, abort=True) + + # Restore from a backup. + await backup.restore(backup_id) + + # Delete a backup. + await backup.delete(backup_id) + +See :class:`arangoasync.backup.Backup` for API specification. diff --git a/docs/certificates.rst b/docs/certificates.rst index c0665fa..f8fa1e5 100644 --- a/docs/certificates.rst +++ b/docs/certificates.rst @@ -108,3 +108,25 @@ Use a client certificate chain If you want to have fine-grained control over the HTTP connection, you should define your HTTP client as described in the :ref:`HTTP` section. + +Security features +================= + +See the `ArangoDB Manual`_ for more information on security features. + +**Example:** + +.. code-block:: python + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + + # Get TLS data + tls = await db.tls() + + # Reload TLS data + tls = await db.reload_tls() + +.. _ArangoDB Manual: https://docs.arango.ai/arangodb/arangodb/stable/develop/http-api/security/ diff --git a/docs/cluster.rst b/docs/cluster.rst new file mode 100644 index 0000000..d5c4908 --- /dev/null +++ b/docs/cluster.rst @@ -0,0 +1,53 @@ +Clusters +-------- + +The cluster-specific API lets you get information about individual +cluster nodes and the cluster as a whole, as well as monitor and +administrate cluster deployments. For more information on the design +and architecture, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arango.ai + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + db = await client.db("_system", auth=auth) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + + # DB-Server statistics + db_server = "PRMR-2716c9d0-4b22-4c66-ba3d-f9cd3143e52b" + stats = await cluster.statistics(db_server) + + # Cluster endpoints + endpoints = await cluster.endpoints() + + # Cluster server ID and role + server_id = await cluster.server_id() + server_role = await cluster.server_role() + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + result = await cluster.calculate_rebalance_plan() + result = await cluster.execute_rebalance_plan(moves=[]) + result = await cluster.rebalance() + +See :class:`arangoasync.cluster.Cluster` for API specification. diff --git a/docs/database.rst b/docs/database.rst index 851cc9d..f4dc759 100644 --- a/docs/database.rst +++ b/docs/database.rst @@ -14,6 +14,7 @@ information. from arangoasync import ArangoClient from arangoasync.auth import Auth + from arangoasync.request import Method, Request # Initialize the client for ArangoDB. async with ArangoClient(hosts="http://localhost:8529") as client: @@ -60,4 +61,10 @@ information. # Delete the database. Note that the new users will remain. await sys_db.delete_database("test") + # Example of a custom request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + See :class:`arangoasync.client.ArangoClient` and :class:`arangoasync.database.StandardDatabase` for API specification. diff --git a/docs/document.rst b/docs/document.rst index 571507e..09b87e0 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -23,7 +23,7 @@ For more information on documents and associated terminologies, refer to `ArangoDB Manual`_. Here is an example of a valid document in "students" collection: -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai .. code-block:: json @@ -150,4 +150,87 @@ Standard documents are managed via collection API wrapper: # Delete one or more matching documents. await students.delete_match({"first": "Emma"}) +Importing documents in bulk is faster when using specialized methods. Suppose +our data is in a file containing JSON Lines (JSONL) format. Each line is expected +to be one JSON object. Example of a "students.jsonl" file: + +.. code-block:: json + + {"_key":"john","name":"John Smith","age":35} + {"_key":"katie","name":"Katie Foster","age":28} + +To import this file into the "students" collection, we can use the `import_bulk` API: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + import aiofiles + + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Read the JSONL file asynchronously. + async with aiofiles.open("students.jsonl", mode="r") as f: + documents = await f.read() + + # Import documents in bulk. + result = await students.import_bulk(documents, doc_type="documents") + +You can manage documents via database API wrappers also, but only simple +operations (i.e. get, insert, update, replace, delete) are supported and you +must provide document IDs instead of keys: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create a new collection named "students" if it does not exist. + if not await db.has_collection("students"): + await db.create_collection("students") + + # Create some test documents to play around with. + # The documents must have the "_id" field instead. + lola = {"_id": "students/lola", "GPA": 3.5} + abby = {"_id": "students/abby", "GPA": 3.2} + john = {"_id": "students/john", "GPA": 3.6} + emma = {"_id": "students/emma", "GPA": 4.0} + + # Insert a new document. + metadata = await db.insert_document("students", lola) + assert metadata["_id"] == "students/lola" + assert metadata["_key"] == "lola" + + # Check if a document exists. + assert await db.has_document(lola) is True + + # Get a document (by ID or body with "_id" field). + await db.document("students/lola") + await db.document(abby) + + # Update a document. + lola["GPA"] = 3.6 + await db.update_document(lola) + + # Replace a document. + lola["GPA"] = 3.4 + await db.replace_document(lola) + + # Delete a document (by ID or body with "_id" field). + await db.delete_document("students/lola") + See :class:`arangoasync.database.StandardDatabase` and :class:`arangoasync.collection.StandardCollection` for API specification. diff --git a/docs/foxx.rst b/docs/foxx.rst new file mode 100644 index 0000000..91e3423 --- /dev/null +++ b/docs/foxx.rst @@ -0,0 +1,147 @@ +Foxx +---- + +**Foxx** is a microservice framework which lets you define custom HTTP endpoints +that extend ArangoDB's REST API. For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arango.ai + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount point. + service_mount = "/test_mount" + + # List services. + await foxx.services() + + # Create a service using a source file. + # In this case, the server must have access to the URL. + service = { + "source": "/tests/static/service.zip", + "configuration": {}, + "dependencies": {}, + } + await foxx.create_service( + mount=service_mount, + service=service, + development=True, + setup=True, + legacy=True + ) + + # Update (upgrade) a service. + await db.foxx.update_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=False + ) + + # Replace (overwrite) a service. + await db.foxx.replace_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=True, + force=False + ) + + # Get service details. + await foxx.service(service_mount) + + # Manage service configuration. + await foxx.config(service_mount) + await foxx.update_config(service_mount, options={}) + await foxx.replace_config(service_mount, options={}) + + # Manage service dependencies. + await foxx.dependencies(service_mount) + await foxx.update_dependencies(service_mount, options={}) + await foxx.replace_dependencies(service_mount, options={}) + + # Toggle development mode for a service. + await foxx.enable_development(service_mount) + await foxx.disable_development(service_mount) + + # Other miscellaneous functions. + await foxx.readme(service_mount) + await foxx.swagger(service_mount) + await foxx.download(service_mount) + await foxx.commit() + await foxx.scripts(service_mount) + await foxx.run_script(service_mount, "setup", {}) + await foxx.run_tests(service_mount, reporter="xunit", output_format="xml") + + # Delete a service. + await foxx.delete_service(service_mount) + +There are other ways to create, update, and replace services, such as +providing a file directly instead of a source URL. This is useful when you +want to deploy a service from a local file system without needing the +server to access the file directly. When using this method, you must provide +the appropriate content type in the headers, such as `application/zip` for ZIP files or +`multipart/form-data` for multipart uploads. The following example demonstrates how to do this: + +.. code-block:: python + + import aiofiles + import aiohttp + import json + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount points. + mount_point = "/test_mount" + + # Create the service using multipart/form-data. + service = aiohttp.FormData() + service.add_field( + "source", + open("./tests/static/service.zip", "rb"), + filename="service.zip", + content_type="application/zip", + ) + service.add_field("configuration", json.dumps({})) + service.add_field("dependencies", json.dumps({})) + service_info = await db.foxx.create_service( + mount=mount_point, service=service, headers={"content-type": "multipart/form-data"} + ) + + # Replace the service using raw data. + async with aiofiles.open("./tests/static/service.zip", mode="rb") as f: + service = await f.read() + service_info = await db.foxx.replace_service( + mount=mount_point, service=service, headers={"content-type": "application/zip"} + ) + + # Delete the service. + await db.foxx.delete_service(mount_point) + +See :class:`arangoasync.foxx.Foxx` for API specification. diff --git a/docs/graph.rst b/docs/graph.rst index 0f0bbbf..b2c2467 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -7,7 +7,7 @@ A **graph** consists of vertices and edges. Vertices are stored as documents in their relations are specified with :ref:`edge definitions `. For more information, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** diff --git a/docs/index.rst b/docs/index.rst index 180c0ed..b9ac826 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -7,7 +7,7 @@ python-arango-async Welcome to the documentation for python-arango-async_, a Python driver for ArangoDB_. -**Note: This project is still in active development, features might be added or removed.** +You can check out a demo app at python-arango-async-demo_. Requirements ============= @@ -43,7 +43,11 @@ Contents .. toctree:: :maxdepth: 1 + foxx transaction + view + analyzer + cluster **API Executions** @@ -57,6 +61,7 @@ Contents .. toctree:: :maxdepth: 1 + admin user **Miscellaneous** @@ -70,6 +75,8 @@ Contents certificates compression serialization + backup + task errors errno logging @@ -85,3 +92,4 @@ Contents .. _ArangoDB: https://www.arangodb.com .. _python-arango-async: https://github.com/arangodb/python-arango-async +.. _python-arango-async-demo: https://github.com/apetenchea/python-arango-async-demo diff --git a/docs/indexes.rst b/docs/indexes.rst index 911efaa..63e2359 100644 --- a/docs/indexes.rst +++ b/docs/indexes.rst @@ -7,7 +7,7 @@ cannot be deleted or modified. Every edge collection has additional indexes on fields ``_from`` and ``_to``. For more information on indexes, refer to `ArangoDB Manual`_. -.. _ArangoDB Manual: https://docs.arangodb.com +.. _ArangoDB Manual: https://docs.arango.ai **Example:** diff --git a/docs/migration.rst b/docs/migration.rst index f26e7d6..0353a0d 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -2,7 +2,7 @@ Coming from python-arango ------------------------- Generally, migrating from `python-arango`_ should be a smooth transition. For the most part, the API is similar, -but there are a few things to note._ +but there are a few things to note. Helpers ======= @@ -51,7 +51,7 @@ this is not always consistent. The asynchronous driver, however, tries to stick to a simple rule: -* If the API returns a camel case key, it will be returned as is. +* If the API returns a camel case key, it will be returned as is. The response is returned from the server as is. * Parameters passed from client to server use the snake case equivalent of the camel case keys required by the API (e.g. `userName` becomes `user_name`). This is done to ensure PEP8 compatibility. @@ -74,6 +74,13 @@ Serialization Check out the :ref:`Serialization` section to learn more about how to implement your own serializer/deserializer. The current driver makes use of generic types and allows for a higher degree of customization. +Replication +=========== + +Although a minimal replication API is available for observability purposes, its use is not recommended. +Most of these are internal APIs that are not meant to be used by the end user. If you need to make any changes +to replication, please do so from the cluster web interface. + Mixing sync and async ===================== diff --git a/docs/overview.rst b/docs/overview.rst index f723234..38ecfd7 100644 --- a/docs/overview.rst +++ b/docs/overview.rst @@ -64,7 +64,7 @@ You may also use the client without a context manager, but you must ensure to cl Another example with `graphs`_: -.. _graphs: https://docs.arangodb.com/stable/graphs/ +.. _graphs: https://docs.arango.ai/arangodb/stable/graphs/ .. code-block:: python diff --git a/docs/specs.rst b/docs/specs.rst index 9983716..e8c0a32 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -28,6 +28,15 @@ python-arango-async. .. automodule:: arangoasync.cursor :members: +.. automodule:: arangoasync.backup + :members: + +.. automodule:: arangoasync.foxx + :members: + +.. automodule:: arangoasync.cluster + :members: + .. automodule:: arangoasync.compression :members: @@ -51,3 +60,6 @@ python-arango-async. .. automodule:: arangoasync.result :members: + +.. automodule:: arangoasync.replication + :members: diff --git a/docs/task.rst b/docs/task.rst new file mode 100644 index 0000000..2490507 --- /dev/null +++ b/docs/task.rst @@ -0,0 +1,51 @@ +Tasks +----- + +ArangoDB can schedule user-defined Javascript snippets as one-time or periodic +(re-scheduled after each execution) tasks. Tasks are executed in the context of +the database they are defined in. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create a new task which simply prints parameters. + await db.create_task( + name="test_task", + command=""" + var task = function(params){ + var db = require('@arangodb'); + db.print(params); + } + task(params); + """, + params={"foo": "bar"}, + offset=300, + period=10, + task_id="001" + ) + + # List all active tasks + tasks = await db.tasks() + + # Retrieve details of a task by ID. + details = await db.task("001") + + # Delete an existing task by ID. + await db.delete_task('001', ignore_missing=True) + + +.. note:: + When deleting a database, any tasks that were initialized under its context + remain active. It is therefore advisable to delete any running tasks before + deleting the database. diff --git a/docs/view.rst b/docs/view.rst new file mode 100644 index 0000000..5ab61e9 --- /dev/null +++ b/docs/view.rst @@ -0,0 +1,69 @@ +Views +----- + +All types of views are supported. . For more information on **view** +management, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arango.ai + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Retrieve list of views. + await db.views() + + # Create a view. + await db.create_view( + name="foo", + view_type="arangosearch", + properties={ + "cleanupIntervalStep": 0, + "consolidationIntervalMsec": 0 + } + ) + + # Rename a view (not supported in cluster deployments). + await db.rename_view("foo", "bar") + + # Retrieve view properties. + await db.view("bar") + + # Retrieve view summary. + await db.view_info("bar") + + # Partially update view properties. + await db.update_view( + name="bar", + properties={ + "cleanupIntervalStep": 1000, + "consolidationIntervalMsec": 200 + } + ) + + # Replace view properties. Unspecified ones are reset to default. + await db.replace_view( + name="bar", + properties={"cleanupIntervalStep": 2000} + ) + + # Delete a view. + await db.delete_view("bar") + +For more information on the content of view **properties**, +see `Search Alias Views`_ and `Arangosearch Views`_. + +.. _Search Alias Views: https://docs.arango.ai/arangodb/stable/develop/http-api/views/search-alias-views/ +.. _Arangosearch Views: https://docs.arango.ai/arangodb/stable/develop/http-api/views/arangosearch-views/ + +Refer to :class:`arangoasync.database.StandardDatabase` class for API specification. diff --git a/pyproject.toml b/pyproject.toml index c5c890f..b01c76f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ version = { attr = "arangoasync.version.__version__" } [project.optional-dependencies] dev = [ + "aiofiles>=24.1.0", "black>=24.2", "flake8>=7.0", "isort>=5.10", @@ -59,6 +60,7 @@ dev = [ "pytest-cov>=5.0", "sphinx>=7.3", "sphinx_rtd_theme>=2.0", + "allure-pytest>=2.15", "types-setuptools", ] diff --git a/starter.sh b/starter.sh old mode 100644 new mode 100755 index be1778a..3eef281 --- a/starter.sh +++ b/starter.sh @@ -6,7 +6,7 @@ # Usage: # ./starter.sh [single|cluster] [community|enterprise] [version] # Example: -# ./starter.sh cluster enterprise 3.11.4 +# ./starter.sh cluster enterprise 3.12.4 setup="${1:-single}" license="${2:-community}" diff --git a/tests/conftest.py b/tests/conftest.py index 98d75de..5025142 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -27,8 +27,10 @@ class GlobalData: graph_name: str = "test_graph" username: str = generate_username() cluster: bool = False - enterprise: bool = False - db_version: version = version.parse("0.0.0") + skip: list[str] = None + foxx_path: str = None + backup_path: str = None + db_version: version.Version = version.parse("0.0.0") global_data = GlobalData() @@ -39,7 +41,7 @@ def pytest_addoption(parser): "--host", action="store", default="127.0.0.1", help="ArangoDB host address" ) parser.addoption( - "--port", action="append", default=["8529"], help="ArangoDB coordinator ports" + "--port", action="append", default=None, help="ArangoDB coordinator ports" ) parser.addoption( "--root", action="store", default="root", help="ArangoDB root user" @@ -54,12 +56,36 @@ def pytest_addoption(parser): "--cluster", action="store_true", help="Run tests in a cluster setup" ) parser.addoption( - "--enterprise", action="store_true", help="Run tests in an enterprise setup" + "--foxx-path", + action="store", + default="/tests/static/service.zip", + help="Foxx tests service path", + ) + parser.addoption( + "--backup-path", + action="store", + default="local://tmp", + help="Backup tests repository path", + ) + parser.addoption( + "--skip", + action="store", + nargs="*", + choices=[ + "backup", # backup tests + "jwt-secret-keyfile", # server was not configured with a keyfile + "foxx", # foxx is not supported + "js-transactions", # javascript transactions are not supported + "task", # tasks API + "enterprise", # skip what used to be "enterprise-only" before 3.12 + ], + default=[], + help="Skip specific tests", ) def pytest_configure(config): - ports = config.getoption("port") + ports = config.getoption("port") or ["8529"] hosts = [f"http://{config.getoption('host')}:{p}" for p in ports] url = hosts[0] @@ -69,7 +95,9 @@ def pytest_configure(config): global_data.secret = config.getoption("secret") global_data.token = JwtToken.generate_token(global_data.secret) global_data.cluster = config.getoption("cluster") - global_data.enterprise = config.getoption("enterprise") + global_data.skip = config.getoption("skip") + global_data.backup_path = config.getoption("backup_path") + global_data.foxx_path = config.getoption("foxx_path") global_data.graph_name = generate_graph_name() async def get_db_version(): @@ -112,8 +140,18 @@ def cluster(): @pytest.fixture -def enterprise(): - return global_data.enterprise +def backup_path(): + return global_data.backup_path + + +@pytest.fixture +def foxx_path(): + return global_data.foxx_path + + +@pytest.fixture +def skip_tests(): + return global_data.skip @pytest.fixture @@ -256,6 +294,19 @@ async def teardown(): verify=False, ) + # Remove all tasks + test_tasks = [ + task + for task in await sys_db.tasks() + if task["name"].startswith("test_task") + ] + await asyncio.gather( + *( + sys_db.delete_task(task["id"], ignore_missing=True) + for task in test_tasks + ) + ) + # Remove all test users. tst_users = [ user["user"] diff --git a/tests/helpers.py b/tests/helpers.py index 8e91c26..2bc04a5 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -44,3 +44,57 @@ def generate_string(): str: Random unique string. """ return uuid4().hex + + +def generate_view_name(): + """Generate and return a random view name. + + Returns: + str: Random view name. + """ + return f"test_view_{uuid4().hex}" + + +def generate_analyzer_name(): + """Generate and return a random analyzer name. + + Returns: + str: Random analyzer name. + """ + return f"test_analyzer_{uuid4().hex}" + + +def generate_task_name(): + """Generate and return a random task name. + + Returns: + str: Random task name. + """ + return f"test_task_{uuid4().hex}" + + +def generate_task_id(): + """Generate and return a random task ID. + + Returns: + str: Random task ID + """ + return f"test_task_id_{uuid4().hex}" + + +def generate_service_mount(): + """Generate and return a random service name. + + Returns: + str: Random service name. + """ + return f"/test_{uuid4().hex}" + + +def generate_token_name(): + """Generate and return a random token name. + + Returns: + str: Random token name. + """ + return f"test_token_{uuid4().hex}" diff --git a/tests/static/cluster-3.11.conf b/tests/static/cluster-3.11.conf deleted file mode 100644 index 86f7855..0000000 --- a/tests/static/cluster-3.11.conf +++ /dev/null @@ -1,14 +0,0 @@ -[starter] -mode = cluster -local = true -address = 0.0.0.0 -port = 8528 - -[auth] -jwt-secret = /tests/static/keyfile - -[args] -all.database.password = passwd -all.database.extended-names = true -all.log.api-enabled = true -all.javascript.allow-admin-execute = true diff --git a/tests/static/service.zip b/tests/static/service.zip new file mode 100644 index 0000000..00bf513 Binary files /dev/null and b/tests/static/service.zip differ diff --git a/tests/static/single-3.11.conf b/tests/static/single-3.11.conf deleted file mode 100644 index df45cb7..0000000 --- a/tests/static/single-3.11.conf +++ /dev/null @@ -1,12 +0,0 @@ -[starter] -mode = single -address = 0.0.0.0 -port = 8528 - -[auth] -jwt-secret = /tests/static/keyfile - -[args] -all.database.password = passwd -all.database.extended-names = true -all.javascript.allow-admin-execute = true diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py new file mode 100644 index 0000000..0557f64 --- /dev/null +++ b/tests/test_analyzer.py @@ -0,0 +1,91 @@ +import pytest +from packaging import version + +from arangoasync.exceptions import ( + AnalyzerCreateError, + AnalyzerDeleteError, + AnalyzerGetError, + AnalyzerListError, +) +from tests.helpers import generate_analyzer_name + + +@pytest.mark.asyncio +async def test_analyzer_management(db, bad_db, skip_tests, db_version): + analyzer_name = generate_analyzer_name() + full_analyzer_name = db.name + "::" + analyzer_name + bad_analyzer_name = generate_analyzer_name() + + # Test create identity analyzer + result = await db.create_analyzer(analyzer_name, "identity") + assert result["name"] == full_analyzer_name + assert result["type"] == "identity" + assert result["properties"] == {} + assert result["features"] == [] + + # Test create delimiter analyzer + result = await db.create_analyzer( + name=generate_analyzer_name(), + analyzer_type="delimiter", + properties={"delimiter": ","}, + ) + assert result["type"] == "delimiter" + assert result["properties"] == {"delimiter": ","} + assert result["features"] == [] + + # Test create duplicate with bad database + with pytest.raises(AnalyzerCreateError): + await bad_db.create_analyzer(analyzer_name, "identity") + + # Test get analyzer + result = await db.analyzer(analyzer_name) + assert result["name"] == full_analyzer_name + assert result["type"] == "identity" + assert result["properties"] == {} + assert result["features"] == [] + + # Test get missing analyzer + with pytest.raises(AnalyzerGetError): + await db.analyzer(bad_analyzer_name) + + # Test list analyzers + result = await db.analyzers() + assert full_analyzer_name in [a["name"] for a in result] + + # Test list analyzers with bad database + with pytest.raises(AnalyzerListError): + await bad_db.analyzers() + + # Test delete analyzer + assert await db.delete_analyzer(analyzer_name, force=True) is True + assert full_analyzer_name not in [a["name"] for a in await db.analyzers()] + + # Test delete missing analyzer + with pytest.raises(AnalyzerDeleteError): + await db.delete_analyzer(analyzer_name) + + # Test delete missing analyzer with ignore_missing set to True + assert await db.delete_analyzer(analyzer_name, ignore_missing=True) is False + + # Test create geo_s2 analyzer + if "enterprise" not in skip_tests: + analyzer_name = generate_analyzer_name() + result = await db.create_analyzer(analyzer_name, "geo_s2", properties={}) + assert result["type"] == "geo_s2" + assert await db.delete_analyzer(analyzer_name) + + if db_version >= version.parse("3.12.0"): + # Test delimiter analyzer with multiple delimiters + result = await db.create_analyzer( + name=generate_analyzer_name(), + analyzer_type="multi_delimiter", + properties={"delimiters": [",", "."]}, + ) + assert result["type"] == "multi_delimiter" + assert result["properties"] == {"delimiters": [",", "."]} + + # Test wildcard analyzer + analyzer_name = generate_analyzer_name() + result = await db.create_analyzer(analyzer_name, "wildcard", {"ngramSize": 4}) + assert result["type"] == "wildcard" + assert result["properties"] == {"ngramSize": 4} diff --git a/tests/test_aql.py b/tests/test_aql.py index ab5ba19..28fa91c 100644 --- a/tests/test_aql.py +++ b/tests/test_aql.py @@ -21,6 +21,7 @@ AQLQueryClearError, AQLQueryExecuteError, AQLQueryExplainError, + AQLQueryHistoryError, AQLQueryKillError, AQLQueryListError, AQLQueryRulesGetError, @@ -96,6 +97,8 @@ async def test_list_queries(superuser, db, bad_db): _ = await superuser.aql.slow_queries(all_queries=True) await aql.clear_slow_queries() await superuser.aql.clear_slow_queries(all_queries=True) + history = await superuser.aql.history() + assert isinstance(history, dict) with pytest.raises(AQLQueryListError): _ = await bad_db.aql.queries() @@ -109,6 +112,8 @@ async def test_list_queries(superuser, db, bad_db): _ = await aql.slow_queries(all_queries=True) with pytest.raises(AQLQueryClearError): await aql.clear_slow_queries(all_queries=True) + with pytest.raises(AQLQueryHistoryError): + _ = await bad_db.aql.history() long_running_task.cancel() @@ -274,17 +279,15 @@ async def test_cache_plan_management(db, bad_db, doc_col, docs, db_version): entries = await cache.plan_entries() assert isinstance(entries, list) assert len(entries) > 0 - with pytest.raises(AQLCacheEntriesError) as err: - _ = await bad_db.aql.cache.plan_entries() - assert err.value.error_code == FORBIDDEN + with pytest.raises(AQLCacheEntriesError): + await bad_db.aql.cache.plan_entries() # Clear the cache await cache.clear_plan() entries = await cache.plan_entries() assert len(entries) == 0 - with pytest.raises(AQLCacheClearError) as err: + with pytest.raises(AQLCacheClearError): await bad_db.aql.cache.clear_plan() - assert err.value.error_code == FORBIDDEN @pytest.mark.asyncio diff --git a/tests/test_backup.py b/tests/test_backup.py new file mode 100644 index 0000000..7e6e37e --- /dev/null +++ b/tests/test_backup.py @@ -0,0 +1,44 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import BackupDeleteError, BackupRestoreError + + +@pytest.mark.asyncio +async def test_backup( + url, sys_db_name, bad_db, token, cluster, db_version, skip_tests, backup_path +): + if "enterprise" in skip_tests: + pytest.skip("Backup API is only available in ArangoDB Enterprise Edition") + if not cluster: + pytest.skip("For simplicity, the backup API is only tested in cluster setups") + if db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the backup API is only tested in the latest versions" + ) + if "backup" in skip_tests: + pytest.skip("Skipping backup tests") + + with pytest.raises(BackupRestoreError): + await bad_db.backup.restore("foobar") + with pytest.raises(BackupDeleteError): + await bad_db.backup.delete("foobar") + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + backup = db.backup + result = await backup.create() + backup_id = result["id"] + result = await backup.get() + assert "list" in result + result = await backup.restore(backup_id) + assert "previous" in result + config = {"local": {"type": "local"}} + result = await backup.upload(backup_id, repository=backup_path, config=config) + assert "uploadId" in result + result = await backup.download(backup_id, repository=backup_path, config=config) + assert "downloadId" in result + await backup.delete(backup_id) diff --git a/tests/test_client.py b/tests/test_client.py index 6210412..2218384 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,11 +1,20 @@ +import time + import pytest from arangoasync.auth import JwtToken from arangoasync.client import ArangoClient from arangoasync.compression import DefaultCompressionManager +from arangoasync.exceptions import ( + AccessTokenCreateError, + AccessTokenDeleteError, + AccessTokenListError, + ServerEncryptionError, +) from arangoasync.http import DefaultHTTPClient from arangoasync.resolver import DefaultHostResolver, RoundRobinHostResolver from arangoasync.version import __version__ +from tests.helpers import generate_token_name @pytest.mark.asyncio @@ -120,16 +129,30 @@ async def test_client_jwt_auth(url, sys_db_name, basic_auth_root): @pytest.mark.asyncio async def test_client_jwt_superuser_auth( - url, sys_db_name, basic_auth_root, token, enterprise + url, sys_db_name, basic_auth_root, token, skip_tests ): # successful authentication async with ArangoClient(hosts=url) as client: db = await client.db( sys_db_name, auth_method="superuser", token=token, verify=True ) - if enterprise: + if "enterprise" not in skip_tests: await db.jwt_secrets() - await db.reload_jwt_secrets() + if "jwt-secret-keyfile" not in skip_tests: + await db.reload_jwt_secrets() + + # Get TLS data + tls = await db.tls() + assert isinstance(tls, dict) + + # Reload TLS data + tls = await db.reload_tls() + assert isinstance(tls, dict) + + # Rotate + with pytest.raises(ServerEncryptionError): + # Not allowed on coordinators + await db.encryption() # token missing async with ArangoClient(hosts=url) as client: @@ -137,3 +160,49 @@ async def test_client_jwt_superuser_auth( await client.db( sys_db_name, auth_method="superuser", auth=basic_auth_root, verify=True ) + + +@pytest.mark.asyncio +async def test_client_access_token(url, sys_db_name, basic_auth_root, bad_db): + username = basic_auth_root.username + + async with ArangoClient(hosts=url) as client: + # First login with basic auth + db_auth_basic = await client.db( + sys_db_name, + auth_method="basic", + auth=basic_auth_root, + verify=True, + ) + + # Create an access token + token_name = generate_token_name() + token = await db_auth_basic.create_access_token( + user=username, name=token_name, valid_until=int(time.time() + 3600) + ) + assert token.active is True + + # Cannot create a token with the same name + with pytest.raises(AccessTokenCreateError): + await db_auth_basic.create_access_token( + user=username, name=token_name, valid_until=int(time.time() + 3600) + ) + + # Authenticate with the created token + access_token_db = await client.db( + sys_db_name, + auth_method="basic", + auth=token.token, + verify=True, + ) + + # List access tokens + tokens = await access_token_db.list_access_tokens(username) + assert isinstance(tokens, list) + with pytest.raises(AccessTokenListError): + await bad_db.list_access_tokens(username) + + # Clean up - delete the created token + await access_token_db.delete_access_token(username, token.id) + with pytest.raises(AccessTokenDeleteError): + await access_token_db.delete_access_token(username, token.id) diff --git a/tests/test_cluster.py b/tests/test_cluster.py new file mode 100644 index 0000000..9a68a6b --- /dev/null +++ b/tests/test_cluster.py @@ -0,0 +1,101 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) + + +@pytest.mark.asyncio +async def test_cluster( + url, sys_db_name, bad_db, token, skip_tests, cluster, db_version +): + if not cluster: + pytest.skip("Cluster API is only tested in cluster setups") + if "enterprise" in skip_tests or db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the cluster API is only tested in the latest versions" + ) + + # Test errors + with pytest.raises(ClusterHealthError): + await bad_db.cluster.health() + with pytest.raises(ClusterStatisticsError): + await bad_db.cluster.statistics("foo") + with pytest.raises(ClusterEndpointsError): + await bad_db.cluster.endpoints() + with pytest.raises(ClusterServerIDError): + await bad_db.cluster.server_id() + with pytest.raises(ClusterServerRoleError): + await bad_db.cluster.server_role() + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_maintenance_mode("on") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_server_maintenance_mode("PRMR0001", "normal") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.server_maintenance_mode("PRMR0001") + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_imbalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.rebalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_rebalance_plan() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.execute_rebalance_plan(moves=[]) + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + assert "Health" in health + + # DB-Server statistics + db_server = None + for server in health["Health"]: + if server.startswith("PRMR"): + db_server = server + break + assert db_server is not None, f"No DB server found in {health}" + stats = await cluster.statistics(db_server) + assert "enabled" in stats + + # Cluster endpoints + endpoints = await cluster.endpoints() + assert len(endpoints) > 0 + + # Cluster server ID and role + server_id = await cluster.server_id() + assert isinstance(server_id, str) + server_role = await cluster.server_role() + assert isinstance(server_role, str) + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + assert isinstance(status, dict) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + assert isinstance(result, dict) + result = await cluster.calculate_rebalance_plan() + assert isinstance(result, dict) + result = await cluster.execute_rebalance_plan(moves=[]) + assert result == 200 + result = await cluster.rebalance() + assert isinstance(result, dict) diff --git a/tests/test_collection.py b/tests/test_collection.py index d9214dd..2dc4c42 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -4,15 +4,26 @@ from arangoasync.errno import DATA_SOURCE_NOT_FOUND, INDEX_NOT_FOUND from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, + DocumentInsertError, IndexCreateError, IndexDeleteError, IndexGetError, IndexListError, IndexLoadError, ) +from tests.helpers import generate_col_name def test_collection_attributes(db, doc_col): @@ -22,7 +33,9 @@ def test_collection_attributes(db, doc_col): @pytest.mark.asyncio -async def test_collection_misc_methods(doc_col, bad_col): +async def test_collection_misc_methods(doc_col, bad_col, docs, cluster): + doc = await doc_col.insert(docs[0]) + # Properties properties = await doc_col.properties() assert properties.name == doc_col.name @@ -31,6 +44,75 @@ async def test_collection_misc_methods(doc_col, bad_col): with pytest.raises(CollectionPropertiesError): await bad_col.properties() + # Configure + wfs = not properties.wait_for_sync + new_properties = await doc_col.configure(wait_for_sync=wfs) + assert new_properties.wait_for_sync == wfs + with pytest.raises(CollectionConfigureError): + await bad_col.configure(wait_for_sync=wfs) + + # Statistics + statistics = await doc_col.statistics() + assert statistics.name == doc_col.name + assert "figures" in statistics + with pytest.raises(CollectionStatisticsError): + await bad_col.statistics() + + # Shards + if cluster: + shard = await doc_col.responsible_shard(doc) + assert isinstance(shard, str) + with pytest.raises(CollectionResponsibleShardError): + await bad_col.responsible_shard(doc) + shards = await doc_col.shards(details=True) + assert isinstance(shards, dict) + with pytest.raises(CollectionShardsError): + await bad_col.shards() + + # Revision + revision = await doc_col.revision() + assert isinstance(revision, str) + with pytest.raises(CollectionRevisionError): + await bad_col.revision() + + # Checksum + checksum = await doc_col.checksum(with_rev=True, with_data=True) + assert isinstance(checksum, str) + with pytest.raises(CollectionChecksumError): + await bad_col.checksum() + + # Recalculate count + with pytest.raises(CollectionRecalculateCountError): + await bad_col.recalculate_count() + await doc_col.recalculate_count() + + # Compact + with pytest.raises(CollectionCompactError): + await bad_col.compact() + res = await doc_col.compact() + assert res.name == doc_col.name + + +@pytest.mark.asyncio +async def test_collection_rename(cluster, db, bad_col, docs): + if cluster: + pytest.skip("Renaming collections is not supported in cluster deployments.") + + with pytest.raises(CollectionRenameError): + await bad_col.rename("new_name") + + col_name = generate_col_name() + new_name = generate_col_name() + try: + await db.create_collection(col_name) + col = db.collection(col_name) + await col.rename(new_name) + assert col.name == new_name + doc = await col.insert(docs[0]) + assert col.get_col_name(doc) == new_name + finally: + db.delete_collection(new_name, ignore_missing=True) + @pytest.mark.asyncio async def test_collection_index(doc_col, bad_col, cluster): @@ -182,3 +264,20 @@ async def test_collection_truncate_count(docs, doc_col, bad_col): await doc_col.truncate(wait_for_sync=True, compact=True) cnt = await doc_col.count() assert cnt == 0 + + +@pytest.mark.asyncio +async def test_collection_import_bulk(doc_col, bad_col, docs): + documents = "\n".join(doc_col.serializer.dumps(doc) for doc in docs) + + # Test errors + with pytest.raises(DocumentInsertError): + await bad_col.import_bulk(documents, doc_type="documents") + + # Insert documents in bulk + result = await doc_col.import_bulk(documents, doc_type="documents") + + # Verify the documents were inserted + count = await doc_col.count() + assert count == len(docs) + assert result["created"] == count diff --git a/tests/test_connection.py b/tests/test_connection.py index 568815c..e053e58 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -223,6 +223,12 @@ async def test_JwtConnection_ping_success( status_code = await connection1.ping() assert status_code == 200 + # Refresh the token + await connection3.refresh_token() + status_code = await connection1.ping() + assert status_code == 200 + assert connection3.token != connection1.token + @pytest.mark.asyncio async def test_JwtSuperuserConnection_ping_success( diff --git a/tests/test_database.py b/tests/test_database.py index eb7daa3..519d0ce 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,27 +1,66 @@ import asyncio +import datetime +import json import pytest +from packaging import version +from arangoasync.client import ArangoClient from arangoasync.collection import StandardCollection from arangoasync.exceptions import ( CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, + ServerApiCallsError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, + ServerModeError, + ServerModeSetError, + ServerReadLogError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, ServerVersionError, ) +from arangoasync.request import Method, Request from arangoasync.typings import CollectionType, KeyOptions, UserInfo from tests.helpers import generate_col_name, generate_db_name, generate_username @pytest.mark.asyncio -async def test_database_misc_methods(sys_db, db, bad_db, cluster): +async def test_database_misc_methods( + sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token, skip_tests +): # Status status = await sys_db.status() assert status["server"] == "arango" @@ -50,11 +89,172 @@ async def test_database_misc_methods(sys_db, db, bad_db, cluster): await bad_db.reload_jwt_secrets() # Version - version = await sys_db.version() - assert version["version"].startswith("3.") + v = await sys_db.version() + assert v["version"].startswith("3.") with pytest.raises(ServerVersionError): await bad_db.version() + # key generators + if db_version >= version.parse("3.12.0"): + key_generators = await db.key_generators() + assert isinstance(key_generators, list) + with pytest.raises(CollectionKeyGeneratorsError): + await bad_db.key_generators() + + # Administration + with pytest.raises(ServerEngineError): + await bad_db.engine() + result = await db.engine() + assert isinstance(result, dict) + + with pytest.raises(ServerTimeError): + await bad_db.time() + time = await db.time() + assert isinstance(time, datetime.datetime) + + with pytest.raises(ServerCheckAvailabilityError): + await bad_db.check_availability() + assert isinstance(await db.check_availability(), str) + + with pytest.raises(DatabaseSupportInfoError): + await bad_db.support_info() + info = await sys_db.support_info() + assert isinstance(info, dict) + + if db_version >= version.parse("3.12.0"): + with pytest.raises(ServerCurrentOptionsGetError): + await bad_db.options() + options = await sys_db.options() + assert isinstance(options, dict) + with pytest.raises(ServerAvailableOptionsGetError): + await bad_db.options_available() + options_available = await sys_db.options_available() + assert isinstance(options_available, dict) + + with pytest.raises(ServerModeError): + await bad_db.mode() + mode = await sys_db.mode() + assert isinstance(mode, str) + with pytest.raises(ServerModeSetError): + await bad_db.set_mode("foo") + mode = await sys_db.set_mode("default") + assert isinstance(mode, str) + + with pytest.raises(ServerLicenseGetError): + await bad_db.license() + license = await sys_db.license() + assert isinstance(license, dict) + with pytest.raises(ServerLicenseSetError): + await sys_db.set_license('"abc"') + + with pytest.raises(ServerShutdownError): + await bad_db.shutdown() + with pytest.raises(ServerShutdownProgressError): + await bad_db.shutdown_progress() + + with pytest.raises(ServerReloadRoutingError): + await bad_db.reload_routing() + await sys_db.reload_routing() + + with pytest.raises(ServerEchoError): + await bad_db.echo() + result = await sys_db.echo() + assert isinstance(result, dict) + + with pytest.raises(ServerExecuteError): + await bad_db.execute("return 1") + result = await sys_db.execute("return 1") + assert result == 1 + + with pytest.raises(DatabaseCompactError): + await bad_db.compact() + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + await db.compact() + + # Custom Request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + assert json.loads(response.raw_body) == 1 + + if "enterprise" not in skip_tests and db_version >= version.parse("3.12.0"): + # API calls + with pytest.raises(ServerApiCallsError): + await bad_db.api_calls() + result = await sys_db.api_calls() + assert isinstance(result, dict) + + +@pytest.mark.asyncio +async def test_metrics(db, bad_db): + with pytest.raises(ServerMetricsError): + await bad_db.metrics() + metrics = await db.metrics() + assert isinstance(metrics, str) + + +@pytest.mark.asyncio +async def test_replication(db, bad_db, cluster): + with pytest.raises(ReplicationInventoryError): + await bad_db.replication.inventory("id") + with pytest.raises(ReplicationDumpError): + await bad_db.replication.dump("test_collection") + if cluster: + with pytest.raises(ReplicationClusterInventoryError): + await bad_db.replication.cluster_inventory() + result = await db.replication.cluster_inventory() + assert isinstance(result, dict) + if not cluster: + with pytest.raises(ReplicationLoggerStateError): + await bad_db.replication.logger_state() + result = await db.replication.logger_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierConfigError): + await bad_db.replication.applier_config() + result = await db.replication.applier_config() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierStateError): + await bad_db.replication.applier_state() + result = await db.replication.applier_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationServerIDError): + await bad_db.replication.server_id() + result = await db.replication.server_id() + assert isinstance(result, str) + + +@pytest.mark.asyncio +async def test_logs(sys_db, bad_db): + with pytest.raises(ServerReadLogError): + await bad_db.read_log_entries() + result = await sys_db.read_log_entries() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelError): + await bad_db.log_levels() + result = await sys_db.log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelSetError): + await bad_db.set_log_levels() + new_levels = {"agency": "DEBUG", "engines": "INFO", "threads": "WARNING"} + result = await sys_db.set_log_levels(**new_levels) + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelResetError): + await bad_db.reset_log_levels() + result = await sys_db.reset_log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingError): + await bad_db.log_settings() + result = await sys_db.log_settings() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingSetError): + await bad_db.set_log_settings() + result = await sys_db.set_log_settings() + assert isinstance(result, dict) + @pytest.mark.asyncio async def test_create_drop_database( diff --git a/tests/test_document.py b/tests/test_document.py index fbfd2b3..741ec34 100644 --- a/tests/test_document.py +++ b/tests/test_document.py @@ -566,3 +566,51 @@ async def test_document_delete_match(doc_col, bad_col, docs): await doc_col.insert_many(docs) count = await doc_col.delete_match({"text": "no_matching"}) assert count == 0 + + +@pytest.mark.asyncio +async def test_document_db_operations(db, bad_db, doc_col, docs): + # Insert a document through the collection API + doc = await doc_col.insert(docs[0]) + + # Check if the document exists in the database + assert await db.has_document(doc) is True + assert await db.has_document({"_id": "missing_col/missing_doc"}) is False + assert await db.has_document("missing_doc") is False + with pytest.raises(DocumentGetError): + await bad_db.has_document(doc) + + # Get the document + doc2 = await db.document(doc["_id"]) + assert doc2["_id"] == doc["_id"] + with pytest.raises(DocumentGetError): + await bad_db.document(doc["_id"]) + + # Insert a new document + doc = await db.insert_document(doc_col.name, docs[1]) + assert doc["_id"] == f"{doc_col.name}/{doc['_key']}" + with pytest.raises(DocumentInsertError): + await bad_db.insert_document(doc_col.name, docs[2]) + + # Update the document + doc["val"] = 100 + updated_doc = await db.update_document(doc, return_new=True) + assert updated_doc["_id"] == doc["_id"] + assert updated_doc["new"]["val"] == 100 + with pytest.raises(DocumentUpdateError): + await bad_db.update_document(doc) + + # Replace the document + doc["val"] = 200 + replaced_doc = await db.replace_document(doc, return_new=True) + assert replaced_doc["_id"] == doc["_id"] + assert replaced_doc["new"]["val"] == 200 + with pytest.raises(DocumentReplaceError): + await bad_db.replace_document(doc) + + # Delete the document + deleted_doc = await db.delete_document(doc["_id"], return_old=True) + assert deleted_doc["_id"] == doc["_id"] + assert deleted_doc["old"]["val"] == 200 + with pytest.raises(DocumentDeleteError): + await bad_db.delete_document(doc) diff --git a/tests/test_foxx.py b/tests/test_foxx.py new file mode 100644 index 0000000..e972dc2 --- /dev/null +++ b/tests/test_foxx.py @@ -0,0 +1,247 @@ +import asyncio +import json + +import aiofiles +import aiohttp +import pytest + +from arangoasync.exceptions import ( + FoxxCommitError, + FoxxConfigGetError, + FoxxConfigReplaceError, + FoxxConfigUpdateError, + FoxxDependencyGetError, + FoxxDependencyReplaceError, + FoxxDependencyUpdateError, + FoxxDevModeDisableError, + FoxxDevModeEnableError, + FoxxDownloadError, + FoxxReadmeGetError, + FoxxScriptListError, + FoxxScriptRunError, + FoxxServiceCreateError, + FoxxServiceDeleteError, + FoxxServiceGetError, + FoxxServiceListError, + FoxxServiceReplaceError, + FoxxServiceUpdateError, + FoxxSwaggerGetError, + FoxxTestRunError, +) +from tests.helpers import generate_service_mount + +service_name = "test" + + +@pytest.mark.asyncio +async def test_foxx(db, bad_db, skip_tests, foxx_path): + if "foxx" in skip_tests: + pytest.skip("Skipping Foxx tests") + + # Test errors + with pytest.raises(FoxxServiceGetError): + await bad_db.foxx.service(service_name) + with pytest.raises(FoxxServiceListError): + await bad_db.foxx.services() + with pytest.raises(FoxxServiceCreateError): + await bad_db.foxx.create_service( + mount=generate_service_mount(), + service={}, + headers={"content-type": "application/zip"}, + ) + with pytest.raises(FoxxServiceDeleteError): + await bad_db.foxx.delete_service(service_name) + with pytest.raises(FoxxServiceReplaceError): + await bad_db.foxx.replace_service( + mount=generate_service_mount(), + service={}, + ) + with pytest.raises(FoxxServiceUpdateError): + await bad_db.foxx.update_service(mount=generate_service_mount(), service={}) + with pytest.raises(FoxxConfigGetError): + await bad_db.foxx.config("foo") + with pytest.raises(FoxxConfigReplaceError): + await bad_db.foxx.replace_config(mount="foo", options={}) + with pytest.raises(FoxxConfigUpdateError): + await bad_db.foxx.update_config(mount="foo", options={}) + with pytest.raises(FoxxDependencyGetError): + await bad_db.foxx.dependencies("foo") + with pytest.raises(FoxxDependencyReplaceError): + await bad_db.foxx.replace_dependencies(mount="foo", options={}) + with pytest.raises(FoxxDependencyUpdateError): + await bad_db.foxx.update_dependencies(mount="foo", options={}) + with pytest.raises(FoxxDevModeEnableError): + await bad_db.foxx.enable_development("foo") + with pytest.raises(FoxxDevModeDisableError): + await bad_db.foxx.disable_development("foo") + with pytest.raises(FoxxReadmeGetError): + await bad_db.foxx.readme("foo") + with pytest.raises(FoxxSwaggerGetError): + await bad_db.foxx.swagger("foo") + with pytest.raises(FoxxDownloadError): + await bad_db.foxx.download("foo") + with pytest.raises(FoxxCommitError): + await bad_db.foxx.commit() + + services = await db.foxx.services() + assert isinstance(services, list) + + # Service as a path + mount1 = generate_service_mount() + service1 = { + "source": foxx_path, + "configuration": {"LOG_LEVEL": "info"}, + "dependencies": {}, + } + service_info = await db.foxx.create_service(mount=mount1, service=service1) + assert service_info["mount"] == mount1 + + # Service as a FormData + mount2 = generate_service_mount() + service2 = aiohttp.FormData() + service2.add_field( + "source", + open(f".{foxx_path}", "rb"), + filename="service.zip", + content_type="application/zip", + ) + service2.add_field("configuration", json.dumps({"LOG_LEVEL": "info"})) + service2.add_field("dependencies", json.dumps({})) + service_info = await db.foxx.create_service( + mount=mount2, service=service2, headers={"content-type": "multipart/form-data"} + ) + assert service_info["mount"] == mount2 + + # Service as raw data + mount3 = generate_service_mount() + async with aiofiles.open(f".{foxx_path}", mode="rb") as f: + service3 = await f.read() + service_info = await db.foxx.create_service( + mount=mount3, service=service3, headers={"content-type": "application/zip"} + ) + assert service_info["mount"] == mount3 + + # Delete service + await db.foxx.delete_service(mount3) + + # Replace service + service4 = { + "source": foxx_path, + "configuration": {"LOG_LEVEL": "info"}, + "dependencies": {}, + } + service_info = await db.foxx.replace_service(mount=mount2, service=service4) + assert service_info["mount"] == mount2 + + async with aiofiles.open(f".{foxx_path}", mode="rb") as f: + service5 = await f.read() + service_info = await db.foxx.replace_service( + mount=mount1, service=service5, headers={"content-type": "application/zip"} + ) + assert service_info["mount"] == mount1 + + # Update service + service6 = { + "source": foxx_path, + "configuration": {"LOG_LEVEL": "debug"}, + "dependencies": {}, + } + service_info = await db.foxx.update_service(mount=mount1, service=service6) + assert service_info["mount"] == mount1 + + services = await db.foxx.services(exclude_system=True) + assert len(services) == 2 + + # Configuration + config = await db.foxx.config(mount1) + assert isinstance(config, dict) + config = await db.foxx.replace_config(mount=mount1, options={}) + assert isinstance(config, dict) + config = await db.foxx.replace_config(mount=mount1, options={}) + assert isinstance(config, dict) + + # Dependencies + config = await db.foxx.dependencies(mount1) + assert isinstance(config, dict) + config = await db.foxx.replace_dependencies(mount=mount1, options={}) + assert isinstance(config, dict) + config = await db.foxx.update_dependencies(mount=mount1, options={}) + assert isinstance(config, dict) + + # Scripts + scripts = await db.foxx.scripts(mount1) + assert "setup" in scripts + assert "teardown" in scripts + + # List missing service scripts + with pytest.raises(FoxxScriptListError): + await db.foxx.scripts("invalid_mount") + + # Run service script + assert await db.foxx.run_script(mount1, "setup", []) == {} + assert await db.foxx.run_script(mount2, "teardown", []) == {} + + # Run missing service script + with pytest.raises(FoxxScriptRunError): + await db.foxx.run_script(mount1, "invalid", ()) + + # Run tests on service + result = await db.foxx.run_tests( + mount=mount1, reporter="suite", idiomatic=True, filter="science" + ) + result = json.loads(result) + assert "stats" in result + assert "tests" in result + assert "suites" in result + + result = await db.foxx.run_tests( + mount=mount2, reporter="stream", output_format="x-ldjson" + ) + for result_part in result.split("\r\n"): + if len(result_part) == 0: + continue + assert result_part.startswith("[") + assert result_part.endswith("]") + + result = await db.foxx.run_tests( + mount=mount1, reporter="stream", output_format="text" + ) + assert result.startswith("[") + assert result.endswith("]") or result.endswith("\r\n") + + result = await db.foxx.run_tests( + mount=mount2, reporter="xunit", output_format="xml" + ) + assert result.startswith("[") + assert result.endswith("]") or result.endswith("\r\n") + + # Run tests on missing service + with pytest.raises(FoxxTestRunError): + await db.foxx.run_tests("foo") + + # Development mode + result = await db.foxx.enable_development(mount1) + assert result["mount"] == mount1 + result = await db.foxx.disable_development(mount1) + assert result["mount"] == mount1 + + # Readme + result = await db.foxx.readme(mount1) + assert isinstance(result, str) + + # Swagger + result = await db.foxx.swagger(mount1) + assert isinstance(result, dict) + + # Download service + result = await db.foxx.download(mount1) + assert isinstance(result, bytes) + + # Commit + await db.foxx.commit(replace=True) + + # Delete remaining services + await asyncio.gather( + db.foxx.delete_service(mount1), + db.foxx.delete_service(mount2), + ) diff --git a/tests/test_graph.py b/tests/test_graph.py index 6d5fcbe..5d70255 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -56,10 +56,10 @@ async def test_graph_basic(db, bad_db): @pytest.mark.asyncio -async def test_graph_properties(db, bad_graph, cluster, enterprise): +async def test_graph_properties(db, bad_graph, cluster, skip_tests): # Create a graph name = generate_graph_name() - is_smart = cluster and enterprise + is_smart = cluster and "enterprise" not in skip_tests options = GraphOptions(number_of_shards=3) graph = await db.create_graph(name, is_smart=is_smart, options=options) diff --git a/tests/test_task.py b/tests/test_task.py new file mode 100644 index 0000000..008e25d --- /dev/null +++ b/tests/test_task.py @@ -0,0 +1,82 @@ +import pytest + +from arangoasync.exceptions import ( + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, +) +from tests.helpers import generate_task_id, generate_task_name + + +@pytest.mark.asyncio +async def test_task_management(sys_db, bad_db, skip_tests): + # This test intentionally uses the system database because cleaning up tasks is + # easier there. + + if "task" in skip_tests: + pytest.skip("Skipping task tests") + + test_command = 'require("@arangodb").print(params);' + + # Test errors + with pytest.raises(TaskCreateError): + await bad_db.create_task(command=test_command) + with pytest.raises(TaskGetError): + await bad_db.task("non_existent_task_id") + with pytest.raises(TaskListError): + await bad_db.tasks() + with pytest.raises(TaskDeleteError): + await bad_db.delete_task("non_existent_task_id") + + # Create a task with a random ID + task_name = generate_task_name() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + ) + assert new_task["name"] == task_name + task_id = new_task["id"] + assert await sys_db.task(task_id) == new_task + + # Delete task + assert await sys_db.delete_task(task_id) is True + + # Create a task with a specific ID + task_name = generate_task_name() + task_id = generate_task_id() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + period=10, + task_id=task_id, + ) + assert new_task["name"] == task_name + assert new_task["id"] == task_id + + # Try to create a duplicate task + with pytest.raises(TaskCreateError): + await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + task_id=task_id, + ) + + # Test get missing task + with pytest.raises(TaskGetError): + await sys_db.task(generate_task_id()) + + # Test list tasks + tasks = await sys_db.tasks() + assert len(tasks) == 1 + + # Delete tasks + assert await sys_db.delete_task(task_id) is True + assert await sys_db.delete_task(task_id, ignore_missing=True) is False + with pytest.raises(TaskDeleteError): + await sys_db.delete_task(task_id) diff --git a/tests/test_transaction.py b/tests/test_transaction.py index f7d7f76..1a7363c 100644 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -14,7 +14,10 @@ @pytest.mark.asyncio -async def test_transaction_execute_raw(db, doc_col, docs): +async def test_transaction_execute_raw(db, doc_col, docs, skip_tests): + if "js-transactions" in skip_tests: + pytest.skip("Skipping JS transaction tests") + # Test a valid JS transaction doc = docs[0] key = doc["_key"] diff --git a/tests/test_typings.py b/tests/test_typings.py index fd04fa1..48e9eb0 100644 --- a/tests/test_typings.py +++ b/tests/test_typings.py @@ -1,7 +1,9 @@ import pytest from arangoasync.typings import ( + AccessToken, CollectionInfo, + CollectionStatistics, CollectionStatus, CollectionType, EdgeDefinitionOptions, @@ -386,3 +388,87 @@ def test_EdgeDefinitionOptions(): ) assert options.satellites == ["col1", "col2"] + + +def test_CollectionStatistics(): + data = { + "figures": { + "indexes": {"count": 1, "size": 1234}, + "documentsSize": 5601, + "cacheInUse": False, + "cacheSize": 0, + "cacheUsage": 0, + }, + "writeConcern": 1, + "waitForSync": False, + "usesRevisionsAsDocumentIds": True, + "syncByRevision": True, + "statusString": "loaded", + "id": "69123", + "isSmartChild": False, + "schema": None, + "name": "products", + "type": 2, + "status": 3, + "count": 1, + "cacheEnabled": False, + "isSystem": False, + "internalValidatorType": 0, + "globallyUniqueId": "hB7C02EE43DCE/69123", + "keyOptions": { + "allowUserKeys": True, + "type": "traditional", + "lastValue": 69129, + }, + "computedValues": None, + "objectId": "69124", + } + + stats = CollectionStatistics(data) + + assert stats.figures == data["figures"] + assert stats.write_concern == 1 + assert stats.wait_for_sync is False + assert stats.use_revisions_as_document_ids is True + assert stats.sync_by_revision is True + assert stats.status_string == "loaded" + assert stats.id == "69123" + assert stats.is_smart_child is False + assert stats.schema is None + assert stats.name == "products" + assert stats.type == CollectionType.DOCUMENT + assert stats.status == CollectionStatus.LOADED + assert stats.count == 1 + assert stats.cache_enabled is False + assert stats.is_system is False + assert stats.internal_validator_type == 0 + assert stats.globally_unique_id == "hB7C02EE43DCE/69123" + assert isinstance(stats.key_options, KeyOptions) + assert stats.key_options["type"] == "traditional" + assert stats.computed_values is None + assert stats.object_id == "69124" + + +def test_AccessToken(): + data = { + "active": True, + "created_at": 1720000000, + "fingerprint": "abc123fingerprint", + "id": 42, + "name": "ci-token", + "token": "v2.local.eyJhbGciOi...", + "valid_until": 1720003600, + } + + access_token = AccessToken(data) + + assert access_token.active is True + assert access_token.created_at == 1720000000 + assert access_token.fingerprint == "abc123fingerprint" + assert access_token.id == 42 + assert access_token.name == "ci-token" + assert access_token.token == "v2.local.eyJhbGciOi..." + assert access_token.valid_until == 1720003600 + + # JsonWrapper behavior + assert access_token.to_dict() == data diff --git a/tests/test_view.py b/tests/test_view.py new file mode 100644 index 0000000..80b2388 --- /dev/null +++ b/tests/test_view.py @@ -0,0 +1,137 @@ +import pytest + +from arangoasync import errno +from arangoasync.exceptions import ( + ViewCreateError, + ViewDeleteError, + ViewGetError, + ViewListError, + ViewRenameError, + ViewReplaceError, + ViewUpdateError, +) +from tests.helpers import generate_view_name + + +@pytest.mark.asyncio +async def test_view_management(db, bad_db, doc_col, cluster): + # Create a view + view_name = generate_view_name() + bad_view_name = generate_view_name() + view_type = "arangosearch" + + result = await db.create_view( + view_name, + view_type, + {"consolidationIntervalMsec": 50000, "links": {doc_col.name: {}}}, + ) + assert "id" in result + assert result["name"] == view_name + assert result["type"] == view_type + assert result["consolidationIntervalMsec"] == 50000 + assert doc_col.name in result["links"] + + # Create view with bad database + with pytest.raises(ViewCreateError): + await bad_db.create_view( + view_name, + view_type, + {"consolidationIntervalMsec": 50000, "links": {doc_col.name: {}}}, + ) + + view_id = result["id"] + + # Test create duplicate view + with pytest.raises(ViewCreateError) as err: + await db.create_view(view_name, view_type, {"consolidationIntervalMsec": 50000}) + assert err.value.error_code == errno.DUPLICATE_NAME + + # Test get view (properties) + view = await db.view(view_name) + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + assert view["consolidationIntervalMsec"] == 50000 + + # Test get missing view + with pytest.raises(ViewGetError) as err: + await db.view(bad_view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test get view info + view_info = await db.view_info(view_name) + assert view_info["id"] == view_id + assert view_info["name"] == view_name + assert view_info["type"] == view_type + assert "consolidationIntervalMsec" not in view_info + with pytest.raises(ViewGetError) as err: + await db.view_info(bad_view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test list views + result = await db.views() + assert len(result) == 1 + view = result[0] + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + + # Test list views with bad database + with pytest.raises(ViewListError) as err: + await bad_db.views() + assert err.value.error_code == errno.FORBIDDEN + + # Test replace view + view = await db.replace_view(view_name, {"consolidationIntervalMsec": 40000}) + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + assert view["consolidationIntervalMsec"] == 40000 + + # Test replace view with bad database + with pytest.raises(ViewReplaceError) as err: + await bad_db.replace_view(view_name, {"consolidationIntervalMsec": 7000}) + assert err.value.error_code == errno.FORBIDDEN + + # Test update view + view = await db.update_view(view_name, {"consolidationIntervalMsec": 70000}) + assert view["id"] == view_id + assert view["name"] == view_name + assert view["type"] == view_type + assert view["consolidationIntervalMsec"] == 70000 + + # Test update view with bad database + with pytest.raises(ViewUpdateError) as err: + await bad_db.update_view(view_name, {"consolidationIntervalMsec": 80000}) + assert err.value.error_code == errno.FORBIDDEN + + # Test rename view + new_view_name = generate_view_name() + if cluster: + with pytest.raises(ViewRenameError): + await db.rename_view(view_name, new_view_name) + new_view_name = view_name + else: + await db.rename_view(view_name, new_view_name) + result = await db.views() + assert len(result) == 1 + view = result[0] + assert view["id"] == view_id + assert view["name"] == new_view_name + + # Test rename missing view + with pytest.raises(ViewRenameError) as err: + await db.rename_view(bad_view_name, view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test delete view + assert await db.delete_view(new_view_name) is True + assert len(await db.views()) == 0 + + # Test delete missing view + with pytest.raises(ViewDeleteError) as err: + await db.delete_view(new_view_name) + assert err.value.error_code == errno.DATA_SOURCE_NOT_FOUND + + # Test delete missing view with ignore_missing set to True + assert await db.delete_view(view_name, ignore_missing=True) is False