diff --git a/.circleci/config.yml b/.circleci/config.yml index fb1bc8e..b71ba0b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -20,8 +20,8 @@ workflows: parameters: python_version: ["3.10", "3.11", "3.12"] arangodb_config: ["single", "cluster"] - arangodb_license: ["community", "enterprise"] - arangodb_version: ["3.11", "3.12"] + arangodb_license: ["enterprise"] + arangodb_version: ["3.12"] jobs: lint: @@ -86,8 +86,8 @@ jobs: args+=("--cluster" "--port=8539" "--port=8549") fi - if [ << parameters.arangodb_license >> = "enterprise" ]; then - args+=("--enterprise") + if [ << parameters.arangodb_license >> != "enterprise" ]; then + args+=("--skip enterprise") fi echo "Running pytest with args: ${args[@]}" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 375d8b0..66044c4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,7 +10,7 @@ pre-commit install # Install git pre-commit hooks Run unit tests with coverage: ```shell -pytest --cov=arango --cov-report=html # Open htmlcov/index.html in your browser +pytest --enterprise --cluster --cov=arango --cov-report=html # Open htmlcov/index.html in your browser ``` To start and ArangoDB instance locally, run: diff --git a/README.md b/README.md index ab24eae..b80d633 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ database natively supporting documents, graphs and search. This is the _asyncio_ alternative of the [python-arango](https://github.com/arangodb/python-arango) driver. -**Note: This project is still in active development, features might be added or removed.** +Check out a demo app at [python-arango-async-demo](https://github.com/apetenchea/python-arango-async-demo). ## Requirements diff --git a/arangoasync/aql.py b/arangoasync/aql.py index b81cade..1fad880 100644 --- a/arangoasync/aql.py +++ b/arangoasync/aql.py @@ -16,6 +16,7 @@ AQLQueryClearError, AQLQueryExecuteError, AQLQueryExplainError, + AQLQueryHistoryError, AQLQueryKillError, AQLQueryListError, AQLQueryRulesGetError, @@ -426,6 +427,25 @@ def response_handler(resp: Response) -> QueryTrackingConfiguration: return await self._executor.execute(request, response_handler) + async def history(self) -> Result[Json]: + """Return recently executed AQL queries (admin only). + + Returns: + dict: AQL query history. + + Raises: + AQLQueryHistoryError: If retrieval fails. + """ + request = Request(method=Method.GET, endpoint="/_admin/server/aql-queries") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise AQLQueryHistoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + async def queries(self, all_queries: bool = False) -> Result[Jsons]: """Return a list of currently running queries. diff --git a/arangoasync/auth.py b/arangoasync/auth.py index 96e9b1b..a4df28f 100644 --- a/arangoasync/auth.py +++ b/arangoasync/auth.py @@ -20,8 +20,8 @@ class Auth: encoding (str): Encoding for the password (default: utf-8) """ - username: str - password: str + username: str = "" + password: str = "" encoding: str = "utf-8" diff --git a/arangoasync/backup.py b/arangoasync/backup.py new file mode 100644 index 0000000..75a26a6 --- /dev/null +++ b/arangoasync/backup.py @@ -0,0 +1,295 @@ +__all__ = ["Backup"] + +from numbers import Number +from typing import Optional, cast + +from arangoasync.exceptions import ( + BackupCreateError, + BackupDeleteError, + BackupDownloadError, + BackupGetError, + BackupRestoreError, + BackupUploadError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons + + +class Backup: + """Backup API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def get(self, backup_id: Optional[str] = None) -> Result[Json]: + """Return backup details. + + Args: + backup_id (str | None): If set, the returned list is restricted to the + backup with the given id. + + Returns: + dict: Backup details. + + Raises: + BackupGetError: If the operation fails. + + References: + - `list-backups `__ + """ # noqa: E501 + data: Json = {} + if backup_id is not None: + data["id"] = backup_id + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/list", + data=self.serializer.dumps(data) if data else None, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def create( + self, + label: Optional[str] = None, + allow_inconsistent: Optional[bool] = None, + force: Optional[bool] = None, + timeout: Optional[Number] = None, + ) -> Result[Json]: + """Create a backup when the global write lock can be obtained. + + Args: + label (str | None): Label for this backup. If not specified, a UUID is used. + allow_inconsistent (bool | None): Allow inconsistent backup when the global + transaction lock cannot be acquired before timeout. + force (bool | None): Forcefully abort all running transactions to ensure a + consistent backup when the global transaction lock cannot be + acquired before timeout. Default (and highly recommended) value + is `False`. + timeout (float | None): The time in seconds that the operation tries to + get a consistent snapshot. + + Returns: + dict: Backup information. + + Raises: + BackupCreateError: If the backup creation fails. + + References: + - `create-backup `__ + """ # noqa: E501 + data: Json = {} + if label is not None: + data["label"] = label + if allow_inconsistent is not None: + data["allowInconsistent"] = allow_inconsistent + if force is not None: + data["force"] = force + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/create", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def restore(self, backup_id: str) -> Result[Json]: + """Restore a local backup. + + Args: + backup_id (str): Backup ID. + + Returns: + dict: Result of the restore operation. + + Raises: + BackupRestoreError: If the restore operation fails. + + References: + - `restore-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/restore", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupRestoreError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def delete(self, backup_id: str) -> None: + """Delete a backup. + + Args: + backup_id (str): Backup ID. + + Raises: + BackupDeleteError: If the delete operation fails. + + References: + - `delete-backup `__ + """ # noqa: E501 + data: Json = {"id": backup_id} + request = Request( + method=Method.POST, + endpoint="/_admin/backup/delete", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise BackupDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def upload( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + upload_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup uploads. + + Args: + backup_id (str | None): Backup ID used for scheduling an upload. Mutually + exclusive with parameter **upload_id**. + repository (str | None): Remote repository URL(e.g. "local://tmp/backups"). + abort (str | None): If set to `True`, running upload is aborted. Used with + parameter **upload_id**. + config (dict | None): Remote repository configuration. Required for scheduling + an upload and mutually exclusive with parameter **upload_id**. + upload_id (str | None): Upload ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Upload details. + + Raises: + BackupUploadError: If upload operation fails. + + References: + - `upload-a-backup-to-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if upload_id is not None: + data["uploadId"] = upload_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/upload", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupUploadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def download( + self, + backup_id: Optional[str] = None, + repository: Optional[str] = None, + abort: Optional[bool] = None, + config: Optional[Json] = None, + download_id: Optional[str] = None, + ) -> Result[Json]: + """Manage backup downloads. + + Args: + backup_id (str | None): Backup ID used for scheduling a download. Mutually + exclusive with parameter **download_id**. + repository (str | None): Remote repository URL (e.g. "local://tmp/backups"). + abort (bool | None): If set to `True`, running download is aborted. + config (dict | None): Remote repository configuration. Required for scheduling + a download and mutually exclusive with parameter **download_id**. + download_id (str | None): Download ID. Mutually exclusive with parameters + **backup_id**, **repository**, and **config**. + + Returns: + dict: Download details. + + Raises: + BackupDownloadError: If the download operation fails. + + References: + - `download-a-backup-from-a-remote-repository `__ + """ # noqa: E501 + data: Json = {} + if download_id is not None: + data["downloadId"] = download_id + if backup_id is not None: + data["id"] = backup_id + if repository is not None: + data["remoteRepository"] = repository + if abort is not None: + data["abort"] = abort + if config is not None: + data["config"] = config + + request = Request( + method=Method.POST, + endpoint="/_admin/backup/download", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise BackupDownloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/client.py b/arangoasync/client.py index 235cfae..b2eed10 100644 --- a/arangoasync/client.py +++ b/arangoasync/client.py @@ -147,7 +147,7 @@ async def db( self, name: str, auth_method: str = "basic", - auth: Optional[Auth] = None, + auth: Optional[Auth | str] = None, token: Optional[JwtToken] = None, verify: bool = False, compression: Optional[CompressionManager] = None, @@ -169,7 +169,8 @@ async def db( and client are synchronized. - "superuser": Superuser JWT authentication. The `token` parameter is required. The `auth` parameter is ignored. - auth (Auth | None): Login information. + auth (Auth | None): Login information (username and password) or + access token. token (JwtToken | None): JWT token. verify (bool): Verify the connection by sending a test request. compression (CompressionManager | None): If set, supersedes the @@ -188,6 +189,9 @@ async def db( """ connection: Connection + if isinstance(auth, str): + auth = Auth(password=auth) + if auth_method == "basic": if auth is None: raise ValueError("Basic authentication requires the `auth` parameter") diff --git a/arangoasync/cluster.py b/arangoasync/cluster.py new file mode 100644 index 0000000..ce33b92 --- /dev/null +++ b/arangoasync/cluster.py @@ -0,0 +1,451 @@ +__all__ = ["Cluster"] + +from typing import List, Optional, cast + +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Cluster: + """Cluster-specific endpoints.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def health(self) -> Result[Json]: + """Queries the health of the cluster. + + Returns: + dict: Health status of the cluster. + + Raises: + ClusterHealthError: If retrieval fails. + + References: + - `get-the-cluster-health `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/health", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterHealthError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def statistics(self, db_server: str) -> Result[Json]: + """Queries the statistics of the given DB-Server. + + Args: + db_server (str): The ID of the DB-Server. + + Returns: + dict: Statistics of the DB-Server. + + Raises: + ClusterStatisticsError: If retrieval fails. + + References: + - `get-the-statistics-of-a-db-server `__ + """ # noqa: E501 + params: Params = {"DBserver": db_server} + + request = Request( + method=Method.GET, + endpoint="/_admin/cluster/statistics", + prefix_needed=False, + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterStatisticsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def endpoints(self) -> Result[List[str]]: + """Fetch all coordinator endpoints. + + Returns: + list: List of coordinator endpoints. + + Raises: + ClusterEndpointsError: If retrieval fails. + + References: + - `list-all-coordinator-endpoints `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/cluster/endpoints", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise ClusterEndpointsError(resp, request) + body: Json = self.deserializer.loads(resp.raw_body) + return [item["endpoint"] for item in body["endpoints"]] + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Get the ID of the current server. + + Returns: + str: Server ID. + + Raises: + ClusterServerIDError: If retrieval fails. + + References: + - `get-the-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/id", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerIDError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["id"]) + + return await self._executor.execute(request, response_handler) + + async def server_role(self) -> Result[str]: + """Get the role of the current server + + Returns: + str: Server role. Possible values: "SINGLE", "COORDINATOR", "PRIMARY", "SECONDARY", "AGENT", "UNDEFINED". + + Raises: + ClusterServerRoleError: If retrieval fails. + + References: + - `get-the-server-role `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/role", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ClusterServerRoleError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["role"]) + + return await self._executor.execute(request, response_handler) + + async def toggle_maintenance_mode(self, mode: str) -> Result[Json]: + """Enable or disable the cluster supervision (agency) maintenance mode. + + Args: + mode (str): Maintenance mode. Allowed values are "on" or "off". + + Returns: + dict: Result of the operation. + + Raises: + ClusterMaintenanceModeError: If the toggle operation fails. + + References: + - `toggle-cluster-maintenance-mode `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/maintenance", + prefix_needed=False, + data=f'"{mode}"', + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def server_maintenance_mode(self, server_id: str) -> Result[Json]: + """Check whether the specified DB-Server is in maintenance mode and until when. + + Args: + server_id (str): Server ID. + + Returns: + dict: Maintenance status for the given server. + + Raises: + ClusterMaintenanceModeError: If retrieval fails. + + References: + - `get-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def toggle_server_maintenance_mode( + self, server_id: str, mode: str, timeout: Optional[int] = None + ) -> None: + """Enable or disable the maintenance mode for the given server. + + Args: + server_id (str): Server ID. + mode (str): Maintenance mode. Allowed values are "normal" and "maintenance". + timeout (int | None): After how many seconds the maintenance mode shall automatically end. + + Raises: + ClusterMaintenanceModeError: If the operation fails. + + References: + - `set-the-maintenance-status-of-a-db-server `__ + """ # noqa: E501 + data: Json = {"mode": mode} + if timeout is not None: + data["timeout"] = timeout + + request = Request( + method=Method.PUT, + endpoint=f"/_admin/cluster/maintenance/{server_id}", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ClusterMaintenanceModeError(resp, request) + + await self._executor.execute(request, response_handler) + + async def calculate_imbalance(self) -> Result[Json]: + """Computes the current cluster imbalance and returns the result. + + Returns: + dict: Cluster imbalance information. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `get-the-current-cluster-imbalance `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/cluster/rebalance") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return Response.format_body(result) + + return await self._executor.execute(request, response_handler) + + async def calculate_rebalance_plan( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def rebalance( + self, + databases_excluded: Optional[List[str]] = None, + exclude_system_collections: Optional[bool] = None, + leader_changes: Optional[bool] = None, + maximum_number_of_moves: Optional[int] = None, + move_followers: Optional[bool] = None, + move_leaders: Optional[bool] = None, + pi_factor: Optional[float] = None, + version: int = 1, + ) -> Result[Json]: + """Compute and execute a set of move shard operations to improve balance. + + Args: + databases_excluded (list | None): List of database names to be excluded from + the analysis. + exclude_system_collections (bool | None): Ignore system collections in the + rebalance plan. + leader_changes (bool | None): Allow leader changes without moving data. + maximum_number_of_moves (int | None): Maximum number of moves to be computed. + move_followers (bool | None): Allow moving shard followers. + move_leaders (bool | None): Allow moving shard leaders. + pi_factor (float | None): A weighting factor that should remain untouched. + version (int): Must be set to 1. + + Returns: + dict: Cluster rebalance plan. + + Raises: + ClusterRebalanceError: If retrieval fails. + + References: + - `compute-and-execute-a-set-of-move-shard-operations-to-improve-balance `__ + """ # noqa: E501 + data: Json = dict(version=version) + if databases_excluded is not None: + data["databasesExcluded"] = databases_excluded + if exclude_system_collections is not None: + data["excludeSystemCollections"] = exclude_system_collections + if leader_changes is not None: + data["leaderChanges"] = leader_changes + if maximum_number_of_moves is not None: + data["maximumNumberOfMoves"] = maximum_number_of_moves + if move_followers is not None: + data["moveFollowers"] = move_followers + if move_leaders is not None: + data["moveLeaders"] = move_leaders + if pi_factor is not None: + data["piFactor"] = pi_factor + + request = Request( + method=Method.PUT, + endpoint="/_admin/cluster/rebalance", + prefix_needed=False, + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Json, result["result"]) + + return await self._executor.execute(request, response_handler) + + async def execute_rebalance_plan( + self, + moves: List[Json], + version: int = 1, + ) -> Result[int]: + """Execute a set of move shard operations. + + Args: + moves (list): List of move shard operations to be executed. + version (int): Must be set to 1. + + Returns: + int: Indicates whether the methods have been accepted and scheduled for execution. + + Raises: + ClusterRebalanceError: If the execution fails. + + References: + - `execute-a-set-of-move-shard-operations `__ + """ # noqa: E501 + data: Json = dict(version=version, moves=moves) + + request = Request( + method=Method.POST, + endpoint="/_admin/cluster/rebalance/execute", + data=self.serializer.dumps(data), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> int: + if not resp.is_success: + raise ClusterRebalanceError(resp, request) + result: int = self.deserializer.loads(resp.raw_body)["code"] + return result + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/collection.py b/arangoasync/collection.py index 810ee06..52a9d9e 100644 --- a/arangoasync/collection.py +++ b/arangoasync/collection.py @@ -16,7 +16,16 @@ HTTP_PRECONDITION_FAILED, ) from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, DocumentDeleteError, @@ -40,7 +49,9 @@ from arangoasync.result import Result from arangoasync.serialization import Deserializer, Serializer from arangoasync.typings import ( + CollectionInfo, CollectionProperties, + CollectionStatistics, IndexProperties, Json, Jsons, @@ -481,6 +492,26 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def recalculate_count(self) -> None: + """Recalculate the document count. + + Raises: + CollectionRecalculateCountError: If re-calculation fails. + + References: + - `recalculate-the-document-count-of-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/recalculateCount", + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRecalculateCountError(resp, request) + + await self._executor.execute(request, response_handler) + async def properties(self) -> Result[CollectionProperties]: """Return the full properties of the current collection. @@ -501,7 +532,129 @@ async def properties(self) -> Result[CollectionProperties]: def response_handler(resp: Response) -> CollectionProperties: if not resp.is_success: raise CollectionPropertiesError(resp, request) - return CollectionProperties(self._executor.deserialize(resp.raw_body)) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def configure( + self, + cache_enabled: Optional[bool] = None, + computed_values: Optional[Jsons] = None, + replication_factor: Optional[int | str] = None, + schema: Optional[Json] = None, + wait_for_sync: Optional[bool] = None, + write_concern: Optional[int] = None, + ) -> Result[CollectionProperties]: + """Changes the properties of a collection. + + Only the provided attributes are updated. + + Args: + cache_enabled (bool | None): Whether the in-memory hash cache + for documents should be enabled for this collection. + computed_values (list | None): An optional list of objects, each + representing a computed value. + replication_factor (int | None): In a cluster, this attribute determines + how many copies of each shard are kept on different DB-Servers. + For SatelliteCollections, it needs to be the string "satellite". + schema (dict | None): The configuration of the collection-level schema + validation for documents. + wait_for_sync (bool | None): If set to `True`, the data is synchronized + to disk before returning from a document create, update, replace or + removal operation. + write_concern (int | None): Determines how many copies of each shard are + required to be in sync on the different DB-Servers. + + Returns: + CollectionProperties: Properties. + + Raises: + CollectionConfigureError: If configuration fails. + + References: + - `change-the-properties-of-a-collection `__ + """ # noqa: E501 + data: Json = {} + if cache_enabled is not None: + data["cacheEnabled"] = cache_enabled + if computed_values is not None: + data["computedValues"] = computed_values + if replication_factor is not None: + data["replicationFactor"] = replication_factor + if schema is not None: + data["schema"] = schema + if wait_for_sync is not None: + data["waitForSync"] = wait_for_sync + if write_concern is not None: + data["writeConcern"] = write_concern + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/properties", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> CollectionProperties: + if not resp.is_success: + raise CollectionConfigureError(resp, request) + return CollectionProperties(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def rename(self, new_name: str) -> None: + """Rename the collection. + + Renames may not be reflected immediately in async execution, batch + execution or transactions. It is recommended to initialize new API + wrappers after a rename. + + Note: + Renaming collections is not supported in cluster deployments. + + Args: + new_name (str): New collection name. + + Raises: + CollectionRenameError: If rename fails. + + References: + - `rename-a-collection `__ + """ # noqa: E501 + data: Json = {"name": new_name} + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/rename", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise CollectionRenameError(resp, request) + self._name = new_name + self._id_prefix = f"{new_name}/" + + await self._executor.execute(request, response_handler) + + async def compact(self) -> Result[CollectionInfo]: + """Compact a collection. + + Returns: + CollectionInfo: Collection information. + + Raises: + CollectionCompactError: If compaction fails. + + References: + - `compact-a-collection `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/compact", + ) + + def response_handler(resp: Response) -> CollectionInfo: + if not resp.is_success: + raise CollectionCompactError(resp, request) + return CollectionInfo(self.deserializer.loads(resp.raw_body)) return await self._executor.execute(request, response_handler) @@ -552,7 +705,10 @@ async def count(self) -> Result[int]: Raises: DocumentCountError: If retrieval fails. - """ + + References: + - `get-the-document-count-of-a-collection `__ + """ # noqa: E501 request = Request( method=Method.GET, endpoint=f"/_api/collection/{self.name}/count" ) @@ -565,6 +721,158 @@ def response_handler(resp: Response) -> int: return await self._executor.execute(request, response_handler) + async def statistics(self) -> Result[CollectionStatistics]: + """Get additional statistical information about the collection. + + Returns: + CollectionStatistics: Collection statistics. + + Raises: + CollectionStatisticsError: If retrieval fails. + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/figures", + ) + + def response_handler(resp: Response) -> CollectionStatistics: + if not resp.is_success: + raise CollectionStatisticsError(resp, request) + return CollectionStatistics(self.deserializer.loads(resp.raw_body)) + + return await self._executor.execute(request, response_handler) + + async def responsible_shard(self, document: Json) -> Result[str]: + """Return the ID of the shard responsible for given document. + + If the document does not exist, return the shard that would be + responsible. + + Args: + document (dict): Document body with "_key" field. + + Returns: + str: Shard ID. + + Raises: + CollectionResponsibleShardError: If retrieval fails. + + References: + - `get-the-responsible-shard-for-a-document `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint=f"/_api/collection/{self.name}/responsibleShard", + data=self.serializer.dumps(document), + ) + + def response_handler(resp: Response) -> str: + if resp.is_success: + body = self.deserializer.loads(resp.raw_body) + return cast(str, body["shardId"]) + raise CollectionResponsibleShardError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def shards(self, details: Optional[bool] = None) -> Result[Json]: + """Return collection shards and properties. + + Available only in a cluster setup. + + Args: + details (bool | None): If set to `True`, include responsible + servers for these shards. + + Returns: + dict: Collection shards. + + Raises: + CollectionShardsError: If retrieval fails. + + References: + - `get-the-shard-ids-of-a-collection `__ + """ # noqa: E501 + params: Params = {} + if details is not None: + params["details"] = details + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/shards", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise CollectionShardsError(resp, request) + return cast(Json, self.deserializer.loads(resp.raw_body)["shards"]) + + return await self._executor.execute(request, response_handler) + + async def revision(self) -> Result[str]: + """Return collection revision. + + Returns: + str: Collection revision. + + Raises: + CollectionRevisionError: If retrieval fails. + + References: + - `get-the-collection-revision-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/revision", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionRevisionError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["revision"]) + + return await self._executor.execute(request, response_handler) + + async def checksum( + self, with_rev: Optional[bool] = None, with_data: Optional[bool] = None + ) -> Result[str]: + """Calculate collection checksum. + + Args: + with_rev (bool | None): Include document revisions in checksum calculation. + with_data (bool | None): Include document data in checksum calculation. + + Returns: + str: Collection checksum. + + Raises: + CollectionChecksumError: If retrieval fails. + + References: + - `get-the-collection-checksum `__ + """ # noqa: E501 + params: Params = {} + if with_rev is not None: + params["withRevision"] = with_rev + if with_data is not None: + params["withData"] = with_data + + request = Request( + method=Method.GET, + endpoint=f"/_api/collection/{self.name}/checksum", + params=params, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise CollectionChecksumError(resp, request) + return cast(str, self.deserializer.loads(resp.raw_body)["checksum"]) + + return await self._executor.execute(request, response_handler) + async def has( self, document: str | Json, @@ -1270,6 +1578,108 @@ def response_handler( return await self._executor.execute(request, response_handler) + async def import_bulk( + self, + documents: bytes | str, + doc_type: Optional[str] = None, + complete: Optional[bool] = True, + details: Optional[bool] = True, + from_prefix: Optional[str] = None, + to_prefix: Optional[str] = None, + overwrite: Optional[bool] = None, + overwrite_collection_prefix: Optional[bool] = None, + on_duplicate: Optional[str] = None, + wait_for_sync: Optional[bool] = None, + ignore_missing: Optional[bool] = None, + ) -> Result[Json]: + """Load JSON data in bulk into ArangoDB. + + Args: + documents (bytes | str): String representation of the JSON data to import. + doc_type (str | None): Determines how the body of the request is interpreted. + Possible values: "", "documents", "array", "auto". + complete (bool | None): If set to `True`, the whole import fails if any error occurs. + Otherwise, the import continues even if some documents are invalid and cannot + be imported, skipping the problematic documents. + details (bool | None): If set to `True`, the result includes a `details` + attribute with information about documents that could not be imported. + from_prefix (str | None): String prefix prepended to the value of "_from" + field in each edge document inserted. For example, prefix "foo" + prepended to "_from": "bar" will result in "_from": "foo/bar". + Applies only to edge collections. + to_prefix (str | None): String prefix prepended to the value of "_to" + field in each edge document inserted. For example, prefix "foo" + prepended to "_to": "bar" will result in "_to": "foo/bar". + Applies only to edge collections. + overwrite (bool | None): If set to `True`, all existing documents are removed + prior to the import. Indexes are still preserved. + overwrite_collection_prefix (bool | None): Force the `fromPrefix` and + `toPrefix`, possibly replacing existing collection name prefixes. + on_duplicate (str | None): Action to take on unique key constraint violations + (for documents with "_key" fields). Allowed values are "error" (do + not import the new documents and count them as errors), "update" + (update the existing documents while preserving any fields missing + in the new ones), "replace" (replace the existing documents with + new ones), and "ignore" (do not import the new documents and count + them as ignored, as opposed to counting them as errors). Options + "update" and "replace" may fail on secondary unique key constraint + violations. + wait_for_sync (bool | None): Block until operation is synchronized to disk. + ignore_missing (bool | None): When importing JSON arrays of tabular data + (type parameter is omitted), the first line of the request body defines + the attribute keys and the subsequent lines the attribute values for each + document. Subsequent lines with a different number of elements than the + first line are not imported by default. You can enable this option to + import them anyway. For the missing elements, the document attributes + are omitted. Excess elements are ignored. + + Returns: + dict: Result of the import operation. + + Raises: + DocumentInsertError: If import fails. + + References: + - `import-json-data-as-documents `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = self.name + if doc_type is not None: + params["type"] = doc_type + if complete is not None: + params["complete"] = complete + if details is not None: + params["details"] = details + if from_prefix is not None: + params["fromPrefix"] = from_prefix + if to_prefix is not None: + params["toPrefix"] = to_prefix + if overwrite is not None: + params["overwrite"] = overwrite + if overwrite_collection_prefix is not None: + params["overwriteCollectionPrefix"] = overwrite_collection_prefix + if on_duplicate is not None: + params["onDuplicate"] = on_duplicate + if wait_for_sync is not None: + params["waitForSync"] = wait_for_sync + if ignore_missing is not None: + params["ignoreMissing"] = ignore_missing + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DocumentInsertError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + request = Request( + method=Method.POST, + endpoint="/_api/import", + data=documents, + params=params, + ) + + return await self._executor.execute(request, response_handler) + class StandardCollection(Collection[T, U, V]): """Standard collection API wrapper. @@ -1444,9 +1854,9 @@ async def insert( def response_handler(resp: Response) -> bool | Json: if resp.is_success: - if silent is True: + if silent: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_BAD_PARAMETER: msg = ( @@ -1551,7 +1961,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1641,7 +2051,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) @@ -1726,7 +2136,7 @@ def response_handler(resp: Response) -> bool | Json: if resp.is_success: if silent is True: return True - return self._executor.deserialize(resp.raw_body) + return self.deserializer.loads(resp.raw_body) msg: Optional[str] = None if resp.status_code == HTTP_PRECONDITION_FAILED: raise DocumentRevisionError(resp, request) diff --git a/arangoasync/connection.py b/arangoasync/connection.py index f404248..5fa6363 100644 --- a/arangoasync/connection.py +++ b/arangoasync/connection.py @@ -160,7 +160,10 @@ def compress_request(self, request: Request) -> bool: return result - async def process_request(self, request: Request) -> Response: + async def process_request( + self, + request: Request, + ) -> Response: """Process request, potentially trying multiple hosts. Args: @@ -173,7 +176,8 @@ async def process_request(self, request: Request) -> Response: ConnectionAbortedError: If it can't connect to host(s) within limit. """ - request.endpoint = f"{self._db_endpoint}{request.endpoint}" + if request.prefix_needed: + request.endpoint = f"{self._db_endpoint}{request.endpoint}" host_index = self._host_resolver.get_host_index() for tries in range(self._host_resolver.max_tries): try: @@ -373,6 +377,7 @@ async def refresh_token(self) -> None: method=Method.POST, endpoint="/_open/auth", data=auth.encode("utf-8"), + prefix_needed=False, ) try: diff --git a/arangoasync/database.py b/arangoasync/database.py index dbcc319..2cbbc68 100644 --- a/arangoasync/database.py +++ b/arangoasync/database.py @@ -6,14 +6,20 @@ ] -from typing import Any, List, Optional, Sequence, TypeVar, cast +from datetime import datetime +from typing import Any, Dict, List, Optional, Sequence, TypeVar, cast from warnings import warn from arangoasync.aql import AQL +from arangoasync.backup import Backup +from arangoasync.cluster import Cluster from arangoasync.collection import Collection, StandardCollection from arangoasync.connection import Connection from arangoasync.errno import HTTP_FORBIDDEN, HTTP_NOT_FOUND from arangoasync.exceptions import ( + AccessTokenCreateError, + AccessTokenDeleteError, + AccessTokenListError, AnalyzerCreateError, AnalyzerDeleteError, AnalyzerGetError, @@ -22,11 +28,14 @@ AsyncJobListError, CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, GraphCreateError, GraphDeleteError, GraphListError, @@ -36,8 +45,37 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerApiCallsError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, + ServerEncryptionError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, + ServerModeError, + ServerModeSetError, + ServerReadLogError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, + ServerTLSError, + ServerTLSReloadError, ServerVersionError, + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, TransactionAbortError, TransactionCommitError, TransactionExecuteError, @@ -64,12 +102,15 @@ DefaultApiExecutor, TransactionApiExecutor, ) +from arangoasync.foxx import Foxx from arangoasync.graph import Graph +from arangoasync.replication import Replication from arangoasync.request import Method, Request from arangoasync.response import Response from arangoasync.result import Result from arangoasync.serialization import Deserializer, Serializer from arangoasync.typings import ( + AccessToken, CollectionInfo, CollectionType, DatabaseProperties, @@ -171,6 +212,42 @@ def aql(self) -> AQL: """ return AQL(self._executor) + @property + def backup(self) -> Backup: + """Return Backup API wrapper. + + Returns: + arangoasync.backup.Backup: Backup API wrapper. + """ + return Backup(self._executor) + + @property + def cluster(self) -> Cluster: + """Return Cluster API wrapper. + + Returns: + arangoasync.cluster.Cluster: Cluster API wrapper. + """ + return Cluster(self._executor) + + @property + def foxx(self) -> Foxx: + """Return Foxx API wrapper. + + Returns: + arangoasync.foxx.Foxx: Foxx API wrapper. + """ + return Foxx(self._executor) + + @property + def replication(self) -> Replication: + """Return Replication API wrapper. + + Returns: + Replication API wrapper. + """ + return Replication(self._executor) + async def properties(self) -> Result[DatabaseProperties]: """Return database properties. @@ -695,6 +772,29 @@ def response_handler(resp: Response) -> bool: return await self._executor.execute(request, response_handler) + async def key_generators(self) -> Result[List[str]]: + """Returns the available key generators for collections. + + Returns: + list: List of available key generators. + + Raises: + CollectionKeyGeneratorsError: If retrieval fails. + + References: + - `get-the-available-key-generators `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/key-generators") + + def response_handler(resp: Response) -> List[str]: + if not resp.is_success: + raise CollectionKeyGeneratorsError(resp, request) + return cast( + List[str], self.deserializer.loads(resp.raw_body)["keyGenerators"] + ) + + return await self._executor.execute(request, response_handler) + async def has_document( self, document: str | Json, @@ -2022,7 +2122,9 @@ async def reload_jwt_secrets(self) -> Result[Json]: References: - `hot-reload-the-jwt-secrets-from-disk `__ """ # noqa: 501 - request = Request(method=Method.POST, endpoint="/_admin/server/jwt") + request = Request( + method=Method.POST, endpoint="/_admin/server/jwt", prefix_needed=False + ) def response_handler(resp: Response) -> Json: if not resp.is_success: @@ -2032,6 +2134,171 @@ def response_handler(resp: Response) -> Json: return await self._executor.execute(request, response_handler) + async def create_access_token( + self, + user: str, + name: str, + valid_until: int, + ) -> Result[AccessToken]: + """Create an access token for the given user. + + Args: + user (str): The name of the user. + name (str): A name for the access token to make identification easier, + like a short description. + valid_until (int): A Unix timestamp in seconds to set the expiration date and time. + + Returns: + AccessToken: Information about the created access token, including the token itself. + + Raises: + AccessTokenCreateError: If the operation fails. + + References: + - `create-an-access-token `__ + """ # noqa: E501 + data: Json = { + "name": name, + "valid_until": valid_until, + } + + request = Request( + method=Method.POST, + endpoint=f"/_api/token/{user}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> AccessToken: + if not resp.is_success: + raise AccessTokenCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return AccessToken(result) + + return await self._executor.execute(request, response_handler) + + async def delete_access_token(self, user: str, token_id: int) -> None: + """List all access tokens for the given user. + + Args: + user (str): The name of the user. + token_id (int): The ID of the access token to delete. + + Raises: + AccessTokenDeleteError: If the operation fails. + + References: + - `delete-an-access-token `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, endpoint=f"/_api/token/{user}/{token_id}" + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise AccessTokenDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def list_access_tokens(self, user: str) -> Result[Jsons]: + """List all access tokens for the given user. + + Args: + user (str): The name of the user. + + Returns: + list: List of access tokens for the user. + + Raises: + AccessTokenListError: If the operation fails. + + References: + - `list-all-access-tokens `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/token/{user}") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AccessTokenListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return cast(Jsons, result["tokens"]) + + return await self._executor.execute(request, response_handler) + + async def tls(self) -> Result[Json]: + """Return TLS data (keyfile, clientCA). + + This API requires authentication. + + Returns: + dict: dict containing the following components: + - keyfile: Information about the key file. + - clientCA: Information about the Certificate Authority (CA) for client certificate verification. + + Raises: + ServerTLSError: If the operation fails. + + References: + - `get-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def reload_tls(self) -> Result[Json]: + """Reload TLS data (keyfile, clientCA). + + This is a protected API and can only be executed with superuser rights. + + Returns: + dict: New TLS data. + + Raises: + ServerTLSReloadError: If the operation fails. + + References: + - `reload-the-tls-data `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/tls") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerTLSReloadError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + + async def encryption(self) -> Result[Json]: + """Rotate the user-supplied keys for encryption. + + This is a protected API and can only be executed with superuser rights. + This API is not available on Coordinator nodes. + + Returns: + dict: Encryption keys. + + Raises: + ServerEncryptionError: If the operation fails. + + References: + - `rotate-the-encryption-keys `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/server/encryption") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEncryptionError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + async def list_transactions(self) -> Result[Jsons]: """List all currently running stream transactions. @@ -2157,6 +2424,903 @@ def response_handler(resp: Response) -> Json: return await self._executor.execute(request, response_handler) + async def tasks(self) -> Result[Jsons]: + """Fetches all existing tasks from the server. + + Returns: + list: List of currently active server tasks. + + Raises: + TaskListError: If the list cannot be retrieved. + + References: + - `list-all-tasks `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/tasks") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise TaskListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def task(self, task_id: str) -> Result[Json]: + """Return the details of an active server task. + + Args: + task_id (str) -> Server task ID. + + Returns: + dict: Details of the server task. + + Raises: + TaskGetError: If the task details cannot be retrieved. + + References: + - `get-a-task `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_task( + self, + command: str, + task_id: Optional[str] = None, + name: Optional[str] = None, + offset: Optional[int] = None, + params: Optional[Json] = None, + period: Optional[int] = None, + ) -> Result[Json]: + """Create a new task. + + Args: + command (str): The JavaScript code to be executed. + task_id (str | None): Optional task ID. If not provided, the server will + generate a unique ID. + name (str | None): The name of the task. + offset (int | None): The offset in seconds after which the task should + start executing. + params (dict | None): Parameters to be passed to the command. + period (int | None): The number of seconds between the executions. + + Returns: + dict: Details of the created task. + + Raises: + TaskCreateError: If the task cannot be created. + + References: + - `create-a-task `__ + - `create-a-task-with-id `__ + """ # noqa: E501 + data: Json = {"command": command} + if name is not None: + data["name"] = name + if offset is not None: + data["offset"] = offset + if params is not None: + data["params"] = params + if period is not None: + data["period"] = period + + if task_id is None: + request = Request( + method=Method.POST, + endpoint="/_api/tasks", + data=self.serializer.dumps(data), + ) + else: + request = Request( + method=Method.PUT, + endpoint=f"/_api/tasks/{task_id}", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise TaskCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_task( + self, + task_id: str, + ignore_missing: bool = False, + ) -> Result[bool]: + """Delete a server task. + + Args: + task_id (str): Task ID. + ignore_missing (bool): If `True`, do not raise an exception if the + task does not exist. + + Returns: + bool: `True` if the task was deleted successfully, `False` if the + task was not found and **ignore_missing** was set to `True`. + + Raises: + TaskDeleteError: If the operation fails. + + References: + - `delete-a-task `__ + """ # noqa: E501 + request = Request(method=Method.DELETE, endpoint=f"/_api/tasks/{task_id}") + + def response_handler(resp: Response) -> bool: + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND and ignore_missing: + return False + raise TaskDeleteError(resp, request) + + return await self._executor.execute(request, response_handler) + + async def engine(self) -> Result[Json]: + """Returns the storage engine the server is configured to use. + + Returns: + dict: Database engine details. + + Raises: + ServerEngineError: If the operation fails. + + References: + - `get-the-storage-engine-type `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_api/engine") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEngineError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def time(self) -> Result[datetime]: + """Return server system time. + + Returns: + datetime.datetime: Server system time. + + Raises: + ServerTimeError: If the operation fails. + + References: + - `get-the-system-time `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/time") + + def response_handler(resp: Response) -> datetime: + if not resp.is_success: + raise ServerTimeError(resp, request) + return datetime.fromtimestamp( + self.deserializer.loads(resp.raw_body)["time"] + ) + + return await self._executor.execute(request, response_handler) + + async def check_availability(self) -> Result[str]: + """Return ArangoDB server availability mode. + + Returns: + str: Server availability mode, either "readonly" or "default". + + Raises: + ServerCheckAvailabilityError: If the operation fails. + + References: + - `check-server-availability `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/availability", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerCheckAvailabilityError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def support_info(self) -> Result[Json]: + """Retrieves deployment information for support purposes. + + Note: + As this API may reveal sensitive data about the deployment, it can only be accessed from inside the _system database. + + Returns: + dict: Deployment information + + Raises: + DatabaseSupportInfoError: If the operation fails. + + References: + - `get-information-about-the-deployment `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/support-info") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise DatabaseSupportInfoError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options(self) -> Result[Json]: + """Return the currently-set server options. + + Returns: + dict: Server options. + + Raises: + ServerCurrentOptionsGetError: If the operation fails. + + References: + - `get-the-startup-option-configuration `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerCurrentOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def options_available(self) -> Result[Json]: + """Return a description of all available server options. + + Returns: + dict: Server options description. + + Raises: + ServerAvailableOptionsGetError: If the operation fails. + + References: + - `get-the-available-startup-options `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/options-description") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerAvailableOptionsGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def mode(self) -> Result[str]: + """Return the server mode ("default" or "readonly"). + + Returns: + str: Server mode, either "default" or "readonly". + + Raises: + ServerModeError: If the operation fails. + + References: + - `return-whether-or-not-a-server-is-in-read-only-mode `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeError(resp, request) + return str(self.deserializer.loads(resp.raw_body)["mode"]) + + return await self._executor.execute(request, response_handler) + + async def set_mode(self, mode: str) -> Result[str]: + """Set the server mode to read-only or default. + + Args: + mode (str): Server mode. Possible values are "default" or "readonly". + + Returns: + str: New server mode. + + Raises: + ServerModeSetError: If the operation fails. + + References: + - `set-the-server-mode-to-read-only-or-default `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/server/mode", + data=self.serializer.dumps({"mode": mode}), + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerModeSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["mode"]) + + return await self._executor.execute(request, response_handler) + + async def license(self) -> Result[Json]: + """View the license information and status of an Enterprise Edition instance. + + Returns: + dict: Server license information. + + Raises: + ServerLicenseGetError: If the operation fails. + + References: + - `get-information-about-the-current-license `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/license") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLicenseGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_license(self, license: str, force: Optional[bool] = False) -> None: + """Set a new license for an Enterprise Edition instance. + + Args: + license (str) -> Base64-encoded license string, wrapped in double-quotes. + force (bool | None) -> Set to `True` to change the license even if it + expires sooner than the current one. + + Raises: + ServerLicenseSetError: If the operation fails. + + References: + - `set-a-new-license `__ + """ # noqa: E501 + params: Params = {} + if force is not None: + params["force"] = force + + request = Request( + method=Method.PUT, + endpoint="/_admin/license", + params=params, + data=license, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerLicenseSetError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown(self, soft: Optional[bool] = None) -> None: + """Initiate server shutdown sequence. + + Args: + soft (bool | None): If set to `True`, this initiates a soft shutdown. + + Raises: + ServerShutdownError: If the operation fails. + + References: + - `start-the-shutdown-sequence `__ + """ # noqa: E501 + params: Params = {} + if soft is not None: + params["soft"] = soft + + request = Request( + method=Method.DELETE, + endpoint="/_admin/shutdown", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerShutdownError(resp, request) + + await self._executor.execute(request, response_handler) + + async def shutdown_progress(self) -> Result[Json]: + """Query the soft shutdown progress. + + Returns: + dict: Information about the shutdown progress. + + Raises: + ServerShutdownProgressError: If the operation fails. + + References: + - `query-the-soft-shutdown-progress `__ + """ # noqa: E501 + request = Request(method=Method.GET, endpoint="/_admin/shutdown") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerShutdownProgressError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def compact( + self, + change_level: Optional[bool] = None, + compact_bottom_most_level: Optional[bool] = None, + ) -> None: + """Compact all databases. This method requires superuser access. + + Note: + This command can cause a full rewrite of all data in all databases, + which may take very long for large databases. + + Args: + change_level (bool | None): Whether or not compacted data should be + moved to the minimum possible level. Default value is `False`. + compact_bottom_most_level (bool | None): Whether or not to compact the bottom-most level of data. + Default value is `False`. + + Returns: + dict: Information about the compaction process. + + Raises: + DatabaseCompactError: If the operation fails. + + References: + - `compact-all-databases `__ + """ # noqa: E501 + data = {} + if change_level is not None: + data["changeLevel"] = change_level + if compact_bottom_most_level is not None: + data["compactBottomMostLevel"] = compact_bottom_most_level + + request = Request( + method=Method.PUT, + endpoint="/_admin/compact", + data=self.serializer.dumps(data), + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise DatabaseCompactError(resp, request) + + await self._executor.execute(request, response_handler) + + async def reload_routing(self) -> None: + """Reload the routing information. + + Raises: + ServerReloadRoutingError: If the operation fails. + + References: + - `reload-the-routing-table `__ + """ # noqa: E501 + request = Request(method=Method.POST, endpoint="/_admin/routing/reload") + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise ServerReloadRoutingError(resp, request) + + await self._executor.execute(request, response_handler) + + async def echo(self, body: Optional[Json] = None) -> Result[Json]: + """Return an object with the servers request information. + + Args: + body (dict | None): Optional body of the request. + + Returns: + dict: Details of the request. + + Raises: + ServerEchoError: If the operation fails. + + References: + - `echo-a-request `__ + """ # noqa: E501 + data = body if body is not None else {} + request = Request(method=Method.POST, endpoint="/_admin/echo", data=data) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerEchoError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def execute(self, command: str) -> Result[Any]: + """Execute raw Javascript command on the server. + + Args: + command (str): Javascript command to execute. + + Returns: + Return value of **command**, if any. + + Raises: + ServerExecuteError: If the execution fails. + + References: + - `execute-a-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_admin/execute", data=command.encode("utf-8") + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise ServerExecuteError(resp, request) + return self.deserializer.loads(resp.raw_body) + + return await self._executor.execute(request, response_handler) + + async def request(self, request: Request) -> Result[Response]: + """Execute a custom request. + + Args: + request (Request): Request object to be executed. + + Returns: + Response: Response object containing the result of the request. + """ + + def response_handler(resp: Response) -> Response: + return resp + + return await self._executor.execute(request, response_handler) + + async def metrics(self, server_id: Optional[str] = None) -> Result[str]: + """Return server metrics in Prometheus format. + + Args: + server_id (str | None): Returns metrics of the specified server. + If no serverId is given, the asked server will reply. + + Returns: + str: Server metrics in Prometheus format. + + Raises: + ServerMetricsError: If the operation fails. + + References: + - `metrics-api-v2 `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.GET, + endpoint="/_admin/metrics/v2", + params=params, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ServerMetricsError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def read_log_entries( + self, + upto: Optional[int | str] = None, + level: Optional[str] = None, + start: Optional[int] = None, + size: Optional[int] = None, + offset: Optional[int] = None, + search: Optional[str] = None, + sort: Optional[str] = None, + server_id: Optional[str] = None, + ) -> Result[Json]: + """Read the global log from server. + + Args: + upto (int | str | None): Return the log entries up to the given level + (mutually exclusive with parameter **level**). Allowed values are + "fatal", "error", "warning", "info" (default), "debug" and "trace". + level (int | str | None): Return the log entries of only the given level + (mutually exclusive with **upto**). + start (int | None): Return the log entries whose ID is greater or equal to + the given value. + size (int | None): Restrict the size of the result to the given value. + This can be used for pagination. + offset (int | None): Number of entries to skip (e.g. for pagination). + search (str | None): Return only the log entries containing the given text. + sort (str | None): Sort the log entries according to the given fashion, + which can be "sort" or "desc". + server_id (str | None): Returns all log entries of the specified server. + If no serverId is given, the asked server will reply. + + Returns: + dict: Server log entries. + + Raises: + ServerReadLogError: If the operation fails. + + References: + - `get-the-global-server-logs `__ + """ # noqa: E501 + params: Params = {} + if upto is not None: + params["upto"] = upto + if level is not None: + params["level"] = level + if start is not None: + params["start"] = start + if size is not None: + params["size"] = size + if offset is not None: + params["offset"] = offset + if search is not None: + params["search"] = search + if sort is not None: + params["sort"] = sort + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.GET, + endpoint="/_admin/log/entries", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerReadLogError(resp, request) + + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def log_levels( + self, server_id: Optional[str] = None, with_appenders: Optional[bool] = None + ) -> Result[Json]: + """Return current logging levels. + + Args: + server_id (str | None): Forward the request to the specified server. + with_appenders (bool | None): Include appenders in the response. + + Returns: + dict: Current logging levels. + + Raises: + ServerLogLevelError: If the operation fails. + + References: + - `get-the-server-log-levels `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders + + request = Request( + method=Method.GET, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_log_levels( + self, + server_id: Optional[str] = None, + with_appenders: Optional[bool] = None, + **kwargs: Dict[str, Any], + ) -> Result[Json]: + """Set the logging levels. + + This method takes arbitrary keyword arguments where the keys are the + logger names and the values are the logging levels. For example: + + .. code-block:: python + + db.set_log_levels( + agency='DEBUG', + collector='INFO', + threads='WARNING' + ) + + Keys that are not valid logger names are ignored. + + Args: + server_id (str | None) -> Forward the request to a specific server. + with_appenders (bool | None): Include appenders in the response. + kwargs (dict): Logging levels to be set. + + Returns: + dict: New logging levels. + + Raises: + ServerLogLevelSetError: If the operation fails. + + References: + - `set-the-structured-log-settings `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders + + request = Request( + method=Method.PUT, + endpoint="/_admin/log/level", + params=params, + data=self.serializer.dumps(kwargs), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def reset_log_levels(self, server_id: Optional[str] = None) -> Result[Json]: + """Reset the logging levels. + + Revert the server’s log level settings to the values they had at startup, + as determined by the startup options specified on the command-line, + a configuration file, and the factory defaults. + + Args: + server_id: Forward the request to a specific server. + + Returns: + dict: New logging levels. + + Raises: + ServerLogLevelResetError: If the operation fails. + + References: + - `reset-the-server-log-levels `__ + """ # noqa: E501 + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request( + method=Method.DELETE, + endpoint="/_admin/log/level", + params=params, + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelResetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def log_settings(self) -> Result[Json]: + """Get the structured log settings. + + Returns: + dict: Current structured log settings. + + Raises: + ServerLogSettingError: If the operation fails. + + References: + - `get-the-structured-log-settings `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/log/structured", + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: + """Set the structured log settings. + + This method takes arbitrary keyword arguments where the keys are the + structured log parameters and the values are true or false, for either + enabling or disabling the parameters. + + .. code-block:: python + + db.set_log_settings( + database=True, + url=True, + username=False, + ) + + Args: + kwargs (dict): Structured log parameters to be set. + + Returns: + dict: New structured log settings. + + Raises: + ServerLogSettingSetError: If the operation fails. + + References: + - `set-the-structured-log-settings `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_admin/log/structured", + data=self.serializer.dumps(kwargs), + prefix_needed=False, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingSetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def api_calls(self) -> Result[Json]: + """Get a list of the most recent requests with a timestamp and the endpoint. + + Returns: + dict: API calls made to the server. + + Raises: + ServerApiCallsError: If the operation fails. + + References: + - `get-recent-api-calls `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_admin/server/api-calls", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerApiCallsError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body)["result"] + return result + + return await self._executor.execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper. diff --git a/arangoasync/exceptions.py b/arangoasync/exceptions.py index e052fd4..58a9505 100644 --- a/arangoasync/exceptions.py +++ b/arangoasync/exceptions.py @@ -111,6 +111,10 @@ class AQLQueryExplainError(ArangoServerError): """Failed to parse and explain query.""" +class AQLQueryHistoryError(ArangoServerError): + """Failed to retrieve running AQL queries.""" + + class AQLQueryKillError(ArangoServerError): """Failed to kill the query.""" @@ -135,6 +139,18 @@ class AQLQueryValidateError(ArangoServerError): """Failed to parse and validate query.""" +class AccessTokenCreateError(ArangoServerError): + """Failed to create an access token.""" + + +class AccessTokenDeleteError(ArangoServerError): + """Failed to delete an access token.""" + + +class AccessTokenListError(ArangoServerError): + """Failed to retrieve access tokens.""" + + class AnalyzerCreateError(ArangoServerError): """Failed to create analyzer.""" @@ -179,14 +195,54 @@ class AuthHeaderError(ArangoClientError): """The authentication header could not be determined.""" +class BackupCreateError(ArangoServerError): + """Failed to create a backup.""" + + +class BackupDeleteError(ArangoServerError): + """Failed to delete a backup.""" + + +class BackupDownloadError(ArangoServerError): + """Failed to download a backup from remote repository.""" + + +class BackupGetError(ArangoServerError): + """Failed to retrieve backup details.""" + + +class BackupRestoreError(ArangoServerError): + """Failed to restore from backup.""" + + +class BackupUploadError(ArangoServerError): + """Failed to upload a backup to remote repository.""" + + class CollectionCreateError(ArangoServerError): """Failed to create collection.""" +class CollectionChecksumError(ArangoServerError): + """Failed to retrieve collection checksum.""" + + +class CollectionConfigureError(ArangoServerError): + """Failed to configure collection properties.""" + + +class CollectionCompactError(ArangoServerError): + """Failed to compact collection.""" + + class CollectionDeleteError(ArangoServerError): """Failed to delete collection.""" +class CollectionKeyGeneratorsError(ArangoServerError): + """Failed to retrieve key generators.""" + + class CollectionListError(ArangoServerError): """Failed to retrieve collections.""" @@ -195,6 +251,34 @@ class CollectionPropertiesError(ArangoServerError): """Failed to retrieve collection properties.""" +class CollectionRecalculateCountError(ArangoServerError): + """Failed to recalculate document count.""" + + +class CollectionRenameError(ArangoServerError): + """Failed to rename collection.""" + + +class CollectionResponsibleShardError(ArangoServerError): + """Failed to retrieve responsible shard.""" + + +class CollectionRevisionError(ArangoServerError): + """Failed to retrieve collection revision.""" + + +class CollectionShardsError(ArangoServerError): + """Failed to retrieve collection shards.""" + + +class CollectionStatisticsError(ArangoServerError): + """Failed to retrieve collection statistics.""" + + +class CollectionTruncateError(ArangoServerError): + """Failed to truncate collection.""" + + class ClientConnectionAbortedError(ArangoClientError): """The connection was aborted.""" @@ -203,8 +287,32 @@ class ClientConnectionError(ArangoClientError): """The request was unable to reach the server.""" -class CollectionTruncateError(ArangoServerError): - """Failed to truncate collection.""" +class ClusterEndpointsError(ArangoServerError): + """Failed to retrieve coordinator endpoints.""" + + +class ClusterHealthError(ArangoServerError): + """Failed to retrieve cluster health.""" + + +class ClusterMaintenanceModeError(ArangoServerError): + """Failed to enable/disable cluster supervision maintenance mode.""" + + +class ClusterRebalanceError(ArangoServerError): + """Failed to execute cluster rebalancing operation.""" + + +class ClusterServerRoleError(ArangoServerError): + """Failed to retrieve server role in a cluster.""" + + +class ClusterServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + +class ClusterStatisticsError(ArangoServerError): + """Failed to retrieve DB-Server statistics.""" class CursorCloseError(ArangoServerError): @@ -227,6 +335,10 @@ class CursorStateError(ArangoClientError): """The cursor object was in a bad state.""" +class DatabaseCompactError(ArangoServerError): + """Failed to compact databases.""" + + class DatabaseCreateError(ArangoServerError): """Failed to create database.""" @@ -243,6 +355,10 @@ class DatabasePropertiesError(ArangoServerError): """Failed to retrieve database properties.""" +class DatabaseSupportInfoError(ArangoServerError): + """Failed to retrieve support info for deployment.""" + + class DeserializationError(ArangoClientError): """Failed to deserialize the server response.""" @@ -303,6 +419,90 @@ class EdgeListError(ArangoServerError): """Failed to retrieve edges coming in and out of a vertex.""" +class FoxxConfigGetError(ArangoServerError): + """Failed to retrieve Foxx service configuration.""" + + +class FoxxConfigReplaceError(ArangoServerError): + """Failed to replace Foxx service configuration.""" + + +class FoxxConfigUpdateError(ArangoServerError): + """Failed to update Foxx service configuration.""" + + +class FoxxCommitError(ArangoServerError): + """Failed to commit local Foxx service state.""" + + +class FoxxDependencyGetError(ArangoServerError): + """Failed to retrieve Foxx service dependencies.""" + + +class FoxxDependencyReplaceError(ArangoServerError): + """Failed to replace Foxx service dependencies.""" + + +class FoxxDependencyUpdateError(ArangoServerError): + """Failed to update Foxx service dependencies.""" + + +class FoxxScriptListError(ArangoServerError): + """Failed to retrieve Foxx service scripts.""" + + +class FoxxDevModeEnableError(ArangoServerError): + """Failed to enable development mode for Foxx service.""" + + +class FoxxDevModeDisableError(ArangoServerError): + """Failed to disable development mode for Foxx service.""" + + +class FoxxDownloadError(ArangoServerError): + """Failed to download Foxx service bundle.""" + + +class FoxxReadmeGetError(ArangoServerError): + """Failed to retrieve Foxx service readme.""" + + +class FoxxScriptRunError(ArangoServerError): + """Failed to run Foxx service script.""" + + +class FoxxServiceCreateError(ArangoServerError): + """Failed to create Foxx service.""" + + +class FoxxServiceDeleteError(ArangoServerError): + """Failed to delete Foxx services.""" + + +class FoxxServiceGetError(ArangoServerError): + """Failed to retrieve Foxx service metadata.""" + + +class FoxxServiceListError(ArangoServerError): + """Failed to retrieve Foxx services.""" + + +class FoxxServiceReplaceError(ArangoServerError): + """Failed to replace Foxx service.""" + + +class FoxxServiceUpdateError(ArangoServerError): + """Failed to update Foxx service.""" + + +class FoxxSwaggerGetError(ArangoServerError): + """Failed to retrieve Foxx service swagger.""" + + +class FoxxTestRunError(ArangoServerError): + """Failed to run Foxx service tests.""" + + class GraphCreateError(ArangoServerError): """Failed to create the graph.""" @@ -367,18 +567,146 @@ class PermissionUpdateError(ArangoServerError): """Failed to update user permission.""" +class ReplicationApplierConfigError(ArangoServerError): + """Failed to retrieve replication applier configuration.""" + + +class ReplicationApplierStateError(ArangoServerError): + """Failed to retrieve replication applier state.""" + + +class ReplicationClusterInventoryError(ArangoServerError): + """Failed to retrieve overview of collection and indexes in a cluster.""" + + +class ReplicationDumpError(ArangoServerError): + """Failed to retrieve collection content.""" + + +class ReplicationInventoryError(ArangoServerError): + """Failed to retrieve inventory of collection and indexes.""" + + +class ReplicationLoggerStateError(ArangoServerError): + """Failed to retrieve logger state.""" + + +class ReplicationServerIDError(ArangoServerError): + """Failed to retrieve server ID.""" + + class SerializationError(ArangoClientError): """Failed to serialize the request.""" +class ServerApiCallsError(ArangoServerError): + """Failed to retrieve the list of recent API calls.""" + + +class ServerAvailableOptionsGetError(ArangoServerError): + """Failed to retrieve available server options.""" + + +class ServerCheckAvailabilityError(ArangoServerError): + """Failed to retrieve server availability mode.""" + + class ServerConnectionError(ArangoServerError): """Failed to connect to ArangoDB server.""" +class ServerCurrentOptionsGetError(ArangoServerError): + """Failed to retrieve currently-set server options.""" + + +class ServerEchoError(ArangoServerError): + """Failed to retrieve details on last request.""" + + +class ServerEncryptionError(ArangoServerError): + """Failed to reload user-defined encryption keys.""" + + +class ServerEngineError(ArangoServerError): + """Failed to retrieve database engine.""" + + +class ServerExecuteError(ArangoServerError): + """Failed to execute raw JavaScript command.""" + + +class ServerMetricsError(ArangoServerError): + """Failed to retrieve server metrics.""" + + +class ServerModeError(ArangoServerError): + """Failed to retrieve server mode.""" + + +class ServerModeSetError(ArangoServerError): + """Failed to set server mode.""" + + +class ServerLicenseGetError(ArangoServerError): + """Failed to retrieve server license.""" + + +class ServerLicenseSetError(ArangoServerError): + """Failed to set server license.""" + + +class ServerLogLevelError(ArangoServerError): + """Failed to retrieve server log levels.""" + + +class ServerLogLevelResetError(ArangoServerError): + """Failed to reset server log levels.""" + + +class ServerLogLevelSetError(ArangoServerError): + """Failed to set server log levels.""" + + +class ServerLogSettingError(ArangoServerError): + """Failed to retrieve server log settings.""" + + +class ServerLogSettingSetError(ArangoServerError): + """Failed to set server log settings.""" + + +class ServerReadLogError(ArangoServerError): + """Failed to retrieve global log.""" + + +class ServerReloadRoutingError(ArangoServerError): + """Failed to reload routing details.""" + + +class ServerShutdownError(ArangoServerError): + """Failed to initiate shutdown sequence.""" + + +class ServerShutdownProgressError(ArangoServerError): + """Failed to retrieve soft shutdown progress.""" + + class ServerStatusError(ArangoServerError): """Failed to retrieve server status.""" +class ServerTLSError(ArangoServerError): + """Failed to retrieve TLS data.""" + + +class ServerTLSReloadError(ArangoServerError): + """Failed to reload TLS.""" + + +class ServerTimeError(ArangoServerError): + """Failed to retrieve server system time.""" + + class ServerVersionError(ArangoServerError): """Failed to retrieve server version.""" @@ -387,6 +715,22 @@ class SortValidationError(ArangoClientError): """Invalid sort parameters.""" +class TaskCreateError(ArangoServerError): + """Failed to create server task.""" + + +class TaskDeleteError(ArangoServerError): + """Failed to delete server task.""" + + +class TaskGetError(ArangoServerError): + """Failed to retrieve server task details.""" + + +class TaskListError(ArangoServerError): + """Failed to retrieve server tasks.""" + + class TransactionAbortError(ArangoServerError): """Failed to abort transaction.""" diff --git a/arangoasync/foxx.py b/arangoasync/foxx.py new file mode 100644 index 0000000..b74d933 --- /dev/null +++ b/arangoasync/foxx.py @@ -0,0 +1,829 @@ +__all__ = ["Foxx"] + +from typing import Any, Optional + +from arangoasync.exceptions import ( + FoxxCommitError, + FoxxConfigGetError, + FoxxConfigReplaceError, + FoxxConfigUpdateError, + FoxxDependencyGetError, + FoxxDependencyReplaceError, + FoxxDependencyUpdateError, + FoxxDevModeDisableError, + FoxxDevModeEnableError, + FoxxDownloadError, + FoxxReadmeGetError, + FoxxScriptListError, + FoxxScriptRunError, + FoxxServiceCreateError, + FoxxServiceDeleteError, + FoxxServiceGetError, + FoxxServiceListError, + FoxxServiceReplaceError, + FoxxServiceUpdateError, + FoxxSwaggerGetError, + FoxxTestRunError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params, RequestHeaders + + +class Foxx: + """Foxx API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + def __repr__(self) -> str: + return f"" + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def services(self, exclude_system: Optional[bool] = False) -> Result[Jsons]: + """List installed services. + + Args: + exclude_system (bool | None): Exclude system services. + + Returns: + list: List of installed services. + + Raises: + FoxxServiceListError: If retrieval fails. + + References: + - `list-the-installed-services `__ + """ # noqa: E501 + params: Params = {} + if exclude_system is not None: + params["excludeSystem"] = exclude_system + + request = Request( + method=Method.GET, + endpoint="/_api/foxx", + params=params, + ) + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise FoxxServiceListError(resp, request) + result: Jsons = self.deserializer.loads_many(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def service(self, mount: str) -> Result[Json]: + """Return service metadata. + + Args: + mount (str): Service mount path (e.g "/_admin/aardvark"). + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceGetError: If retrieval fails. + + References: + - `get-the-service-description `__ + """ # noqa: E501 + params: Params = {"mount": mount} + request = Request( + method=Method.GET, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def create_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + development: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + ) -> Result[Json]: + """Installs the given new service at the given mount path. + + Args: + mount (str): Mount path the service should be installed at. + service (Any): Service payload. Can be a JSON string, a file-like object, or a + multipart form. + headers (dict | None): Request headers. + development (bool | None): Whether to install the service in development mode. + setup (bool | None): Whether to run the service setup script. + legacy (bool | None): Whether to install in legacy mode. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceCreateError: If installation fails. + + References: + - `install-a-new-service-mode `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if development is not None: + params["development"] = development + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.POST, + endpoint="/_api/foxx", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceCreateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def delete_service( + self, + mount: str, + teardown: Optional[bool] = None, + ) -> None: + """Removes the service at the given mount path from the database and file system. + + Args: + mount (str): Mount path of the service to uninstall. + teardown (bool | None): Whether to run the teardown script. + + Raises: + FoxxServiceDeleteError: If operations fails. + + References: + - `uninstall-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/service", + params=params, + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxServiceDeleteError(resp, request) + + await self._executor.execute(request, response_handler) + + async def replace_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Replace an existing Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to replace. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to install in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceReplaceError: If replacement fails. + + References: + - `replace-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_service( + self, + mount: str, + service: Any, + headers: Optional[RequestHeaders] = None, + teardown: Optional[bool] = None, + setup: Optional[bool] = None, + legacy: Optional[bool] = None, + force: Optional[bool] = None, + ) -> Result[Json]: + """Upgrade a Foxx service at the given mount path. + + Args: + mount (str): Mount path of the service to upgrade. + service (Any): Service payload (JSON string, file-like object, or multipart form). + headers (dict | None): Optional request headers. + teardown (bool | None): Whether to run the teardown script. + setup (bool | None): Whether to run the setup script. + legacy (bool | None): Whether to upgrade in legacy mode. + force (bool | None): Set to `True` to force service install even if no service is installed under given mount. + + Returns: + dict: Service metadata. + + Raises: + FoxxServiceUpdateError: If upgrade fails. + + References: + - `upgrade-a-service `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if teardown is not None: + params["teardown"] = teardown + if setup is not None: + params["setup"] = setup + if legacy is not None: + params["legacy"] = legacy + if force is not None: + params["force"] = force + + if isinstance(service, dict): + data = self.serializer.dumps(service) + else: + data = service + + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/service", + params=params, + data=data, + headers=headers, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxServiceUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def config(self, mount: str) -> Result[Json]: + """Return service configuration. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service configuration. + + Raises: + FoxxConfigGetError: If retrieval fails. + + References: + - `get-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_config(self, mount: str, options: Json) -> Result[Json]: + """Update service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are ignored. + + Returns: + dict: Updated configuration values. + + Raises: + FoxxConfigUpdateError: If update fails. + + References: + - `update-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_config(self, mount: str, options: Json) -> Result[Json]: + """Replace service configuration. + + Args: + mount (str): Service mount path. + options (dict): Configuration values. Omitted options are reset to their + default values or marked as un-configured. + + Returns: + dict: Replaced configuration values. + + Raises: + FoxxConfigReplaceError: If replace fails. + + References: + - `replace-the-configuration-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/configuration", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxConfigReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dependencies(self, mount: str) -> Result[Json]: + """Return service dependencies. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service dependencies settings. + + Raises: + FoxxDependencyGetError: If retrieval fails. + + References: + - `get-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def update_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Update service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are ignored. + + Returns: + dict: Updated dependency settings. + + Raises: + FoxxDependencyUpdateError: If update fails. + + References: + - `update-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PATCH, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyUpdateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def replace_dependencies(self, mount: str, options: Json) -> Result[Json]: + """Replace service dependencies. + + Args: + mount (str): Service mount path. + options (dict): Dependencies settings. Omitted ones are disabled. + + Returns: + dict: Replaced dependency settings. + + Raises: + FoxxDependencyReplaceError: If replace fails. + + References: + - `replace-the-dependency-options `__ + """ # noqa: E501 + request = Request( + method=Method.PUT, + endpoint="/_api/foxx/dependencies", + params={"mount": mount}, + data=self.serializer.dumps(options), + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDependencyReplaceError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def scripts(self, mount: str) -> Result[Json]: + """List service scripts. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service scripts. + + Raises: + FoxxScriptListError: If retrieval fails. + + References: + - `list-the-service-scripts `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/scripts", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxScriptListError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_script( + self, mount: str, name: str, arg: Optional[Json] = None + ) -> Result[Any]: + """Run a service script. + + Args: + mount (str): Service mount path. + name (str): Script name. + arg (dict | None): Arbitrary value passed into the script as first argument. + + Returns: + Any: Returns the exports of the script, if any. + + Raises: + FoxxScriptRunError: If script fails. + + References: + - `run-a-service-script `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint=f"/_api/foxx/scripts/{name}", + params={"mount": mount}, + data=self.serializer.dumps(arg) if arg is not None else None, + ) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise FoxxScriptRunError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def run_tests( + self, + mount: str, + reporter: Optional[str] = None, + idiomatic: Optional[bool] = None, + filter: Optional[str] = None, + output_format: Optional[str] = None, + ) -> Result[str]: + """Run service tests. + + Args: + mount (str): Service mount path. + reporter (str | None): Test reporter. Allowed values are "default" (simple + list of test cases), "suite" (object of test cases nested in + suites), "stream" (raw stream of test results), "xunit" (XUnit or + JUnit compatible structure), or "tap" (raw TAP compatible stream). + idiomatic (bool | None): Use matching format for the reporter, regardless of + the value of parameter **output_format**. + filter (str | None): Only run tests whose full name (test suite and + test case) matches the given string. + output_format (str | None): Used to further control format. Allowed values + are "x-ldjson", "xml" and "text". When using "stream" reporter, + setting this to "x-ldjson" returns newline-delimited JSON stream. + When using "tap" reporter, setting this to "text" returns plain + text TAP report. When using "xunit" reporter, settings this to + "xml" returns an XML instead of JSONML. + + Returns: + str: Reporter output (e.g. raw JSON string, XML, plain text). + + Raises: + FoxxTestRunError: If test fails. + + References: + - `run-the-service-tests `__ + """ # noqa: E501 + params: Params = dict() + params["mount"] = mount + if reporter is not None: + params["reporter"] = reporter + if idiomatic is not None: + params["idiomatic"] = idiomatic + if filter is not None: + params["filter"] = filter + + headers: RequestHeaders = {} + if output_format == "x-ldjson": + headers["accept"] = "application/x-ldjson" + elif output_format == "xml": + headers["accept"] = "application/xml" + elif output_format == "text": + headers["accept"] = "text/plain" + + request = Request( + method=Method.POST, + endpoint="/_api/foxx/tests", + params=params, + headers=headers, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxTestRunError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def enable_development(self, mount: str) -> Result[Json]: + """Puts the service into development mode. + + While the service is running in development mode, it is reloaded from + the file system, and its setup script (if any) is re-executed every + time the service handles a request. + + In a cluster with multiple coordinators, changes to the filesystem on + one coordinator is not reflected across other coordinators. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeEnableError: If the operation fails. + + References: + - `enable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.POST, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeEnableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def disable_development(self, mount: str) -> Result[Json]: + """Puts the service into production mode. + + In a cluster with multiple coordinators, the services on all other + coordinators are replaced with the version on the calling coordinator. + + Args: + mount (str): Service mount path. + + Returns: + dict: Service metadata. + + Raises: + FoxxDevModeDisableError: If the operation fails. + + References: + - `disable-the-development-mode `__ + """ # noqa: E501 + request = Request( + method=Method.DELETE, + endpoint="/_api/foxx/development", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxDevModeDisableError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def readme(self, mount: str) -> Result[str]: + """Return the service readme. + + Args: + mount (str): Service mount path. + + Returns: + str: Service readme content. + + Raises: + FoxxReadmeGetError: If retrieval fails. + + References: + - `get-the-service-readme `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/foxx/readme", + params={"mount": mount}, + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise FoxxReadmeGetError(resp, request) + return resp.raw_body.decode("utf-8") + + return await self._executor.execute(request, response_handler) + + async def swagger(self, mount: str) -> Result[Json]: + """Return the Swagger API description for the given service. + + Args: + mount (str): Service mount path. + + Returns: + dict: Swagger API description. + + Raises: + FoxxSwaggerGetError: If retrieval fails. + + References: + - `get-the-swagger-description `__ + """ # noqa: E501 + request = Request( + method=Method.GET, endpoint="/_api/foxx/swagger", params={"mount": mount} + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise FoxxSwaggerGetError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def download(self, mount: str) -> Result[bytes]: + """Downloads a zip bundle of the service directory. + + When development mode is enabled, this always creates a new bundle. + Otherwise, the bundle will represent the version of a service that is + installed on that ArangoDB instance. + + Args: + mount (str): Service mount path. + + Returns: + bytes: Service bundle zip in raw bytes form. + + Raises: + FoxxDownloadError: If download fails. + + References: + - `download-a-service-bundle `__ + """ # noqa: E501 + request = Request( + method=Method.POST, endpoint="/_api/foxx/download", params={"mount": mount} + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise FoxxDownloadError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def commit(self, replace: Optional[bool] = None) -> None: + """Commit local service state of the coordinator to the database. + + This can be used to resolve service conflicts between coordinators + that cannot be fixed automatically due to missing data. + + Args: + replace (bool | None): If set to `True`, any existing service files in the database + will be overwritten. + + Raises: + FoxxCommitError: If commit fails. + + References: + - `commit-the-local-service-state `__ + """ # noqa: E501 + params: Params = {} + if replace is not None: + params["replace"] = replace + + request = Request( + method=Method.POST, endpoint="/_api/foxx/commit", params=params + ) + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise FoxxCommitError(resp, request) + + await self._executor.execute(request, response_handler) diff --git a/arangoasync/replication.py b/arangoasync/replication.py new file mode 100644 index 0000000..9d96709 --- /dev/null +++ b/arangoasync/replication.py @@ -0,0 +1,270 @@ +__all__ = ["Replication"] + + +from typing import Optional + +from arangoasync.exceptions import ( + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, +) +from arangoasync.executor import ApiExecutor +from arangoasync.request import Method, Request +from arangoasync.response import Response +from arangoasync.result import Result +from arangoasync.serialization import Deserializer, Serializer +from arangoasync.typings import Json, Jsons, Params + + +class Replication: + """Replication API wrapper.""" + + def __init__(self, executor: ApiExecutor) -> None: + self._executor = executor + + @property + def serializer(self) -> Serializer[Json]: + """Return the serializer.""" + return self._executor.serializer + + @property + def deserializer(self) -> Deserializer[Json, Jsons]: + """Return the deserializer.""" + return self._executor.deserializer + + async def inventory( + self, + batch_id: str, + include_system: Optional[bool] = None, + all_databases: Optional[bool] = None, + collection: Optional[bool] = None, + db_server: Optional[str] = None, + ) -> Result[Json]: + """ + Return an overview of collections and indexes. + + Args: + batch_id (str): Batch ID. + include_system (bool | None): Include system collections. + all_databases (bool | None): Include all databases (only on "_system"). + collection (bool | None): If this parameter is set, the + response will be restricted to a single collection (the one specified), + and no views will be returned. + db_server (str | None): On a Coordinator, this request must have a + DBserver query parameter + + Returns: + dict: Overview of collections and indexes. + + Raises: + ReplicationInventoryError: If retrieval fails. + + References: + - `get-a-replication-inventory `__ + """ # noqa: E501 + params: Params = dict() + params["batchId"] = batch_id + if include_system is not None: + params["includeSystem"] = include_system + if all_databases is not None: + params["global"] = all_databases + if collection is not None: + params["collection"] = collection + if db_server is not None: + params["DBServer"] = db_server + + request = Request( + method=Method.GET, + endpoint="/_api/replication/inventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def dump( + self, + collection: str, + batch_id: Optional[str] = None, + chunk_size: Optional[int] = None, + ) -> Result[bytes]: + """Return the events data of one collection. + + Args: + collection (str): ID of the collection to dump. + batch_id (str | None): Batch ID. + chunk_size (int | None): Size of the result in bytes. This value is honored + approximately only. + + Returns: + bytes: Collection events data. + + Raises: + ReplicationDumpError: If retrieval fails. + + References: + - `get-a-replication-dump `__ + """ # noqa: E501 + params: Params = dict() + params["collection"] = collection + if batch_id is not None: + params["batchId"] = batch_id + if chunk_size is not None: + params["chunkSize"] = chunk_size + + request = Request( + method=Method.GET, + endpoint="/_api/replication/dump", + params=params, + ) + + def response_handler(resp: Response) -> bytes: + if not resp.is_success: + raise ReplicationDumpError(resp, request) + return resp.raw_body + + return await self._executor.execute(request, response_handler) + + async def cluster_inventory( + self, include_system: Optional[bool] = None + ) -> Result[Json]: + """Return an overview of collections and indexes in a cluster. + + Args: + include_system (bool | None): Include system collections. + + Returns: + dict: Overview of collections and indexes in the cluster. + + Raises: + ReplicationClusterInventoryError: If retrieval fails. + + References: + - `get-the-cluster-collections-and-indexes `__ + """ # noqa: E501 + params: Params = {} + if include_system is not None: + params["includeSystem"] = include_system + + request = Request( + method=Method.GET, + endpoint="/_api/replication/clusterInventory", + params=params, + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationClusterInventoryError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def logger_state(self) -> Result[Json]: + """Return the state of the replication logger. + + Returns: + dict: Logger state. + + Raises: + ReplicationLoggerStateError: If retrieval fails. + + References: + - `get-the-replication-logger-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/logger-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationLoggerStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_config(self) -> Result[Json]: + """Return the configuration of the replication applier. + + Returns: + dict: Configuration of the replication applier. + + Raises: + ReplicationApplierConfigError: If retrieval fails. + + References: + - `get-the-replication-applier-configuration `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-config", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierConfigError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def applier_state(self) -> Result[Json]: + """Return the state of the replication applier. + + Returns: + dict: State of the replication applier. + + Raises: + ReplicationApplierStateError: If retrieval fails. + + References: + - `get-the-replication-applier-state `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/applier-state", + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ReplicationApplierStateError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return result + + return await self._executor.execute(request, response_handler) + + async def server_id(self) -> Result[str]: + """Return the current server's ID. + + Returns: + str: Server ID. + + Raises: + ReplicationServerIDError: If retrieval fails. + + References: + - `get-the-replication-server-id `__ + """ # noqa: E501 + request = Request( + method=Method.GET, + endpoint="/_api/replication/server-id", + ) + + def response_handler(resp: Response) -> str: + if not resp.is_success: + raise ReplicationServerIDError(resp, request) + result: Json = self.deserializer.loads(resp.raw_body) + return str(result["serverId"]) + + return await self._executor.execute(request, response_handler) diff --git a/arangoasync/request.py b/arangoasync/request.py index 951c9e9..9c43508 100644 --- a/arangoasync/request.py +++ b/arangoasync/request.py @@ -4,7 +4,7 @@ ] from enum import Enum, auto -from typing import Optional +from typing import Any, Optional from arangoasync.auth import Auth from arangoasync.typings import Params, RequestHeaders @@ -31,16 +31,18 @@ class Request: endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). Attributes: method (Method): HTTP method. endpoint (str): API endpoint. headers (dict | None): Request headers. params (dict | None): URL parameters. - data (bytes | None): Request payload. + data (Any): Request payload. auth (Auth | None): Authentication. + prefix_needed (bool): Whether the request needs a prefix (e.g., database name). """ __slots__ = ( @@ -50,6 +52,7 @@ class Request: "params", "data", "auth", + "prefix_needed", ) def __init__( @@ -58,15 +61,17 @@ def __init__( endpoint: str, headers: Optional[RequestHeaders] = None, params: Optional[Params] = None, - data: Optional[bytes | str] = None, + data: Optional[Any] = None, auth: Optional[Auth] = None, + prefix_needed: bool = True, ) -> None: self.method: Method = method self.endpoint: str = endpoint self.headers: RequestHeaders = headers or dict() self.params: Params = params or dict() - self.data: Optional[bytes | str] = data + self.data: Optional[Any] = data self.auth: Optional[Auth] = auth + self.prefix_needed = prefix_needed def normalized_headers(self) -> RequestHeaders: """Normalize request headers. diff --git a/arangoasync/typings.py b/arangoasync/typings.py index 280e27e..0d85035 100644 --- a/arangoasync/typings.py +++ b/arangoasync/typings.py @@ -791,8 +791,6 @@ def compatibility_formatter(data: Json) -> Json: result["deleted"] = data["deleted"] if "syncByRevision" in data: result["sync_by_revision"] = data["syncByRevision"] - if "tempObjectId" in data: - result["temp_object_id"] = data["tempObjectId"] if "usesRevisionsAsDocumentIds" in data: result["rev_as_id"] = data["usesRevisionsAsDocumentIds"] if "isDisjoint" in data: @@ -819,6 +817,146 @@ def format(self, formatter: Optional[Formatter] = None) -> Json: return self.compatibility_formatter(self._data) +class CollectionStatistics(JsonWrapper): + """Statistical information about the collection. + + Example: + .. code-block:: json + + { + "figures" : { + "indexes" : { + "count" : 1, + "size" : 1234 + }, + "documentsSize" : 5601, + "cacheInUse" : false, + "cacheSize" : 0, + "cacheUsage" : 0, + "engine" : { + "documents" : 1, + "indexes" : [ + { + "type" : "primary", + "id" : 0, + "count" : 1 + } + ] + } + }, + "writeConcern" : 1, + "waitForSync" : false, + "usesRevisionsAsDocumentIds" : true, + "syncByRevision" : true, + "statusString" : "loaded", + "id" : "69123", + "isSmartChild" : false, + "schema" : null, + "name" : "products", + "type" : 2, + "status" : 3, + "count" : 1, + "cacheEnabled" : false, + "isSystem" : false, + "internalValidatorType" : 0, + "globallyUniqueId" : "hB7C02EE43DCE/69123", + "keyOptions" : { + "allowUserKeys" : true, + "type" : "traditional", + "lastValue" : 69129 + }, + "computedValues" : null, + "objectId" : "69124" + } + + References: + - `get-the-collection-statistics `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def figures(self) -> Json: + return cast(Json, self._data.get("figures")) + + @property + def write_concern(self) -> Optional[int]: + return self._data.get("writeConcern") + + @property + def wait_for_sync(self) -> Optional[bool]: + return self._data.get("waitForSync") + + @property + def use_revisions_as_document_ids(self) -> Optional[bool]: + return self._data.get("usesRevisionsAsDocumentIds") + + @property + def sync_by_revision(self) -> Optional[bool]: + return self._data.get("syncByRevision") + + @property + def status_string(self) -> Optional[str]: + return self._data.get("statusString") + + @property + def id(self) -> str: + return self._data["id"] # type: ignore[no-any-return] + + @property + def is_smart_child(self) -> bool: + return self._data["isSmartChild"] # type: ignore[no-any-return] + + @property + def schema(self) -> Optional[Json]: + return self._data.get("schema") + + @property + def name(self) -> str: + return self._data["name"] # type: ignore[no-any-return] + + @property + def type(self) -> CollectionType: + return CollectionType.from_int(self._data["type"]) + + @property + def status(self) -> CollectionStatus: + return CollectionStatus.from_int(self._data["status"]) + + @property + def count(self) -> int: + return self._data["count"] # type: ignore[no-any-return] + + @property + def cache_enabled(self) -> Optional[bool]: + return self._data.get("cacheEnabled") + + @property + def is_system(self) -> bool: + return self._data["isSystem"] # type: ignore[no-any-return] + + @property + def internal_validator_type(self) -> Optional[int]: + return self._data.get("internalValidatorType") + + @property + def globally_unique_id(self) -> str: + return self._data["globallyUniqueId"] # type: ignore[no-any-return] + + @property + def key_options(self) -> KeyOptions: + return KeyOptions(self._data["keyOptions"]) + + @property + def computed_values(self) -> Optional[Json]: + return self._data.get("computedValues") + + @property + def object_id(self) -> str: + return self._data["objectId"] # type: ignore[no-any-return] + + class IndexProperties(JsonWrapper): """Properties of an index. @@ -1886,3 +2024,55 @@ def __init__( @property def satellites(self) -> Optional[List[str]]: return cast(Optional[List[str]], self._data.get("satellites")) + + +class AccessToken(JsonWrapper): + """User access token. + + Example: + .. code-block:: json + + { + "id" : 1, + "name" : "Token for Service A", + "valid_until" : 1782864000, + "created_at" : 1765543306, + "fingerprint" : "v1...71227d", + "active" : true, + "token" : "v1.7b2265223a3137471227d" + } + + References: + - `create-an-access-token `__ + """ # noqa: E501 + + def __init__(self, data: Json) -> None: + super().__init__(data) + + @property + def active(self) -> bool: + return cast(bool, self._data["active"]) + + @property + def created_at(self) -> int: + return cast(int, self._data["created_at"]) + + @property + def fingerprint(self) -> str: + return cast(str, self._data["fingerprint"]) + + @property + def id(self) -> int: + return cast(int, self._data["id"]) + + @property + def name(self) -> str: + return cast(str, self._data["name"]) + + @property + def token(self) -> str: + return cast(str, self._data["token"]) + + @property + def valid_until(self) -> int: + return cast(int, self._data["valid_until"]) diff --git a/arangoasync/version.py b/arangoasync/version.py index b1a19e3..976498a 100644 --- a/arangoasync/version.py +++ b/arangoasync/version.py @@ -1 +1 @@ -__version__ = "0.0.5" +__version__ = "1.0.3" diff --git a/docs/admin.rst b/docs/admin.rst new file mode 100644 index 0000000..6120567 --- /dev/null +++ b/docs/admin.rst @@ -0,0 +1,50 @@ +Server Administration +--------------------- + +ArangoDB provides operations for server administration and monitoring. +Most of these operations can only be performed by admin users via the +``_system`` database. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + sys_db = await client.db("_system", auth=auth) + + # Retrieve the database engine. + await sys_db.engine() + + # Retrieve the server time.. + time = await sys_db.time() + + # Check server availability + availability = sys_db.check_availability() + + # Support info + info = sys_db.support_info() + + # Get the startup option configuration + options = await sys_db.options() + + # Get the available startup options + options = await sys_db.options_available() + + # Return whether or not a server is in read-only mode + mode = await sys_db.mode() + + # Get license information + license = await sys_db.license() + + # Execute Javascript on the server + result = await sys_db.execute("return 1") + + # Get metrics in Prometheus format + metrics = await db.metrics() diff --git a/docs/backup.rst b/docs/backup.rst new file mode 100644 index 0000000..de36041 --- /dev/null +++ b/docs/backup.rst @@ -0,0 +1,78 @@ +Backups +------- + +Hot Backups are near instantaneous consistent snapshots of an entire ArangoDB deployment. +This includes all databases, collections, indexes, Views, graphs, and users at any given time. +For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import JwtToken + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + token = JwtToken.generate_token(LOGIN_SECRET) + + # Connect to "_system" database as root user. + db = await client.db( + "_system", auth_method="superuser", token=token, verify=True + ) + + # Get the backup API wrapper. + backup = db.backup + + # Create a backup. + result = await backup.create( + label="foo", + allow_inconsistent=True, + force=False, + timeout=1000 + ) + backup_id = result["id"] + + # Retrieve details on all backups + backups = await backup.get() + + # Retrieve details on a specific backup. + details = await backup.get(backup_id=backup_id) + + # Upload a backup to a remote repository. + result = await backup.upload( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + upload_id = result["uploadId"] + + # Get status of an upload. + status = await backup.upload(upload_id=upload_id) + + # Abort an upload. + await backup.upload(upload_id=upload_id, abort=True) + + # Download a backup from a remote repository. + result = await backup.download( + backup_id=backup_id, + repository="local://tmp/backups", + config={"local": {"type": "local"}} + ) + download_id = result["downloadId"] + + # Get status of an download. + status = await backup.download(download_id=download_id) + + # Abort an download. + await backup.download(download_id=download_id, abort=True) + + # Restore from a backup. + await backup.restore(backup_id) + + # Delete a backup. + await backup.delete(backup_id) + +See :class:`arangoasync.backup.Backup` for API specification. diff --git a/docs/certificates.rst b/docs/certificates.rst index c0665fa..ee49e13 100644 --- a/docs/certificates.rst +++ b/docs/certificates.rst @@ -108,3 +108,25 @@ Use a client certificate chain If you want to have fine-grained control over the HTTP connection, you should define your HTTP client as described in the :ref:`HTTP` section. + +Security features +================= + +See the `ArangoDB Manual`_ for more information on security features. + +**Example:** + +.. code-block:: python + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + + # Get TLS data + tls = await db.tls() + + # Reload TLS data + tls = await db.reload_tls() + +.. _ArangoDB Manual: https://docs.arangodb.com/stable/develop/http-api/security/ diff --git a/docs/cluster.rst b/docs/cluster.rst new file mode 100644 index 0000000..c5e58aa --- /dev/null +++ b/docs/cluster.rst @@ -0,0 +1,53 @@ +Clusters +-------- + +The cluster-specific API lets you get information about individual +cluster nodes and the cluster as a whole, as well as monitor and +administrate cluster deployments. For more information on the design +and architecture, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "_system" database as root user. + db = await client.db("_system", auth=auth) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + + # DB-Server statistics + db_server = "PRMR-2716c9d0-4b22-4c66-ba3d-f9cd3143e52b" + stats = await cluster.statistics(db_server) + + # Cluster endpoints + endpoints = await cluster.endpoints() + + # Cluster server ID and role + server_id = await cluster.server_id() + server_role = await cluster.server_role() + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + result = await cluster.calculate_rebalance_plan() + result = await cluster.execute_rebalance_plan(moves=[]) + result = await cluster.rebalance() + +See :class:`arangoasync.cluster.Cluster` for API specification. diff --git a/docs/database.rst b/docs/database.rst index 851cc9d..f4dc759 100644 --- a/docs/database.rst +++ b/docs/database.rst @@ -14,6 +14,7 @@ information. from arangoasync import ArangoClient from arangoasync.auth import Auth + from arangoasync.request import Method, Request # Initialize the client for ArangoDB. async with ArangoClient(hosts="http://localhost:8529") as client: @@ -60,4 +61,10 @@ information. # Delete the database. Note that the new users will remain. await sys_db.delete_database("test") + # Example of a custom request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + See :class:`arangoasync.client.ArangoClient` and :class:`arangoasync.database.StandardDatabase` for API specification. diff --git a/docs/document.rst b/docs/document.rst index c0764e8..da6434b 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -150,6 +150,39 @@ Standard documents are managed via collection API wrapper: # Delete one or more matching documents. await students.delete_match({"first": "Emma"}) +Importing documents in bulk is faster when using specialized methods. Suppose +our data is in a file containing JSON Lines (JSONL) format. Each line is expected +to be one JSON object. Example of a "students.jsonl" file: + +.. code-block:: json + + {"_key":"john","name":"John Smith","age":35} + {"_key":"katie","name":"Katie Foster","age":28} + +To import this file into the "students" collection, we can use the `import_bulk` API: + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + import aiofiles + + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the API wrapper for "students" collection. + students = db.collection("students") + + # Read the JSONL file asynchronously. + async with aiofiles.open("students.jsonl", mode="r") as f: + documents = await f.read() + + # Import documents in bulk. + result = await students.import_bulk(documents, doc_type="documents") + You can manage documents via database API wrappers also, but only simple operations (i.e. get, insert, update, replace, delete) are supported and you must provide document IDs instead of keys: diff --git a/docs/foxx.rst b/docs/foxx.rst new file mode 100644 index 0000000..818c80e --- /dev/null +++ b/docs/foxx.rst @@ -0,0 +1,147 @@ +Foxx +---- + +**Foxx** is a microservice framework which lets you define custom HTTP endpoints +that extend ArangoDB's REST API. For more information, refer to `ArangoDB Manual`_. + +.. _ArangoDB Manual: https://docs.arangodb.com + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount point. + service_mount = "/test_mount" + + # List services. + await foxx.services() + + # Create a service using a source file. + # In this case, the server must have access to the URL. + service = { + "source": "/tests/static/service.zip", + "configuration": {}, + "dependencies": {}, + } + await foxx.create_service( + mount=service_mount, + service=service, + development=True, + setup=True, + legacy=True + ) + + # Update (upgrade) a service. + await db.foxx.update_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=False + ) + + # Replace (overwrite) a service. + await db.foxx.replace_service( + mount=service_mount, + service=service, + teardown=True, + setup=True, + legacy=True, + force=False + ) + + # Get service details. + await foxx.service(service_mount) + + # Manage service configuration. + await foxx.config(service_mount) + await foxx.update_config(service_mount, options={}) + await foxx.replace_config(service_mount, options={}) + + # Manage service dependencies. + await foxx.dependencies(service_mount) + await foxx.update_dependencies(service_mount, options={}) + await foxx.replace_dependencies(service_mount, options={}) + + # Toggle development mode for a service. + await foxx.enable_development(service_mount) + await foxx.disable_development(service_mount) + + # Other miscellaneous functions. + await foxx.readme(service_mount) + await foxx.swagger(service_mount) + await foxx.download(service_mount) + await foxx.commit() + await foxx.scripts(service_mount) + await foxx.run_script(service_mount, "setup", {}) + await foxx.run_tests(service_mount, reporter="xunit", output_format="xml") + + # Delete a service. + await foxx.delete_service(service_mount) + +There are other ways to create, update, and replace services, such as +providing a file directly instead of a source URL. This is useful when you +want to deploy a service from a local file system without needing the +server to access the file directly. When using this method, you must provide +the appropriate content type in the headers, such as `application/zip` for ZIP files or +`multipart/form-data` for multipart uploads. The following example demonstrates how to do this: + +.. code-block:: python + + import aiofiles + import aiohttp + import json + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Get the Foxx API wrapper. + foxx = db.foxx + + # Define the test mount points. + mount_point = "/test_mount" + + # Create the service using multipart/form-data. + service = aiohttp.FormData() + service.add_field( + "source", + open("./tests/static/service.zip", "rb"), + filename="service.zip", + content_type="application/zip", + ) + service.add_field("configuration", json.dumps({})) + service.add_field("dependencies", json.dumps({})) + service_info = await db.foxx.create_service( + mount=mount_point, service=service, headers={"content-type": "multipart/form-data"} + ) + + # Replace the service using raw data. + async with aiofiles.open("./tests/static/service.zip", mode="rb") as f: + service = await f.read() + service_info = await db.foxx.replace_service( + mount=mount_point, service=service, headers={"content-type": "application/zip"} + ) + + # Delete the service. + await db.foxx.delete_service(mount_point) + +See :class:`arangoasync.foxx.Foxx` for API specification. diff --git a/docs/index.rst b/docs/index.rst index 375303c..b9ac826 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -7,7 +7,7 @@ python-arango-async Welcome to the documentation for python-arango-async_, a Python driver for ArangoDB_. -**Note: This project is still in active development, features might be added or removed.** +You can check out a demo app at python-arango-async-demo_. Requirements ============= @@ -43,9 +43,11 @@ Contents .. toctree:: :maxdepth: 1 + foxx transaction view analyzer + cluster **API Executions** @@ -59,6 +61,7 @@ Contents .. toctree:: :maxdepth: 1 + admin user **Miscellaneous** @@ -72,6 +75,8 @@ Contents certificates compression serialization + backup + task errors errno logging @@ -87,3 +92,4 @@ Contents .. _ArangoDB: https://www.arangodb.com .. _python-arango-async: https://github.com/arangodb/python-arango-async +.. _python-arango-async-demo: https://github.com/apetenchea/python-arango-async-demo diff --git a/docs/migration.rst b/docs/migration.rst index f26e7d6..0353a0d 100644 --- a/docs/migration.rst +++ b/docs/migration.rst @@ -2,7 +2,7 @@ Coming from python-arango ------------------------- Generally, migrating from `python-arango`_ should be a smooth transition. For the most part, the API is similar, -but there are a few things to note._ +but there are a few things to note. Helpers ======= @@ -51,7 +51,7 @@ this is not always consistent. The asynchronous driver, however, tries to stick to a simple rule: -* If the API returns a camel case key, it will be returned as is. +* If the API returns a camel case key, it will be returned as is. The response is returned from the server as is. * Parameters passed from client to server use the snake case equivalent of the camel case keys required by the API (e.g. `userName` becomes `user_name`). This is done to ensure PEP8 compatibility. @@ -74,6 +74,13 @@ Serialization Check out the :ref:`Serialization` section to learn more about how to implement your own serializer/deserializer. The current driver makes use of generic types and allows for a higher degree of customization. +Replication +=========== + +Although a minimal replication API is available for observability purposes, its use is not recommended. +Most of these are internal APIs that are not meant to be used by the end user. If you need to make any changes +to replication, please do so from the cluster web interface. + Mixing sync and async ===================== diff --git a/docs/specs.rst b/docs/specs.rst index 9983716..e8c0a32 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -28,6 +28,15 @@ python-arango-async. .. automodule:: arangoasync.cursor :members: +.. automodule:: arangoasync.backup + :members: + +.. automodule:: arangoasync.foxx + :members: + +.. automodule:: arangoasync.cluster + :members: + .. automodule:: arangoasync.compression :members: @@ -51,3 +60,6 @@ python-arango-async. .. automodule:: arangoasync.result :members: + +.. automodule:: arangoasync.replication + :members: diff --git a/docs/task.rst b/docs/task.rst new file mode 100644 index 0000000..2490507 --- /dev/null +++ b/docs/task.rst @@ -0,0 +1,51 @@ +Tasks +----- + +ArangoDB can schedule user-defined Javascript snippets as one-time or periodic +(re-scheduled after each execution) tasks. Tasks are executed in the context of +the database they are defined in. + +**Example:** + +.. code-block:: python + + from arangoasync import ArangoClient + from arangoasync.auth import Auth + + # Initialize the client for ArangoDB. + async with ArangoClient(hosts="http://localhost:8529") as client: + auth = Auth(username="root", password="passwd") + + # Connect to "test" database as root user. + db = await client.db("test", auth=auth) + + # Create a new task which simply prints parameters. + await db.create_task( + name="test_task", + command=""" + var task = function(params){ + var db = require('@arangodb'); + db.print(params); + } + task(params); + """, + params={"foo": "bar"}, + offset=300, + period=10, + task_id="001" + ) + + # List all active tasks + tasks = await db.tasks() + + # Retrieve details of a task by ID. + details = await db.task("001") + + # Delete an existing task by ID. + await db.delete_task('001', ignore_missing=True) + + +.. note:: + When deleting a database, any tasks that were initialized under its context + remain active. It is therefore advisable to delete any running tasks before + deleting the database. diff --git a/pyproject.toml b/pyproject.toml index c5c890f..b01c76f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ version = { attr = "arangoasync.version.__version__" } [project.optional-dependencies] dev = [ + "aiofiles>=24.1.0", "black>=24.2", "flake8>=7.0", "isort>=5.10", @@ -59,6 +60,7 @@ dev = [ "pytest-cov>=5.0", "sphinx>=7.3", "sphinx_rtd_theme>=2.0", + "allure-pytest>=2.15", "types-setuptools", ] diff --git a/tests/conftest.py b/tests/conftest.py index 98d75de..5025142 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -27,8 +27,10 @@ class GlobalData: graph_name: str = "test_graph" username: str = generate_username() cluster: bool = False - enterprise: bool = False - db_version: version = version.parse("0.0.0") + skip: list[str] = None + foxx_path: str = None + backup_path: str = None + db_version: version.Version = version.parse("0.0.0") global_data = GlobalData() @@ -39,7 +41,7 @@ def pytest_addoption(parser): "--host", action="store", default="127.0.0.1", help="ArangoDB host address" ) parser.addoption( - "--port", action="append", default=["8529"], help="ArangoDB coordinator ports" + "--port", action="append", default=None, help="ArangoDB coordinator ports" ) parser.addoption( "--root", action="store", default="root", help="ArangoDB root user" @@ -54,12 +56,36 @@ def pytest_addoption(parser): "--cluster", action="store_true", help="Run tests in a cluster setup" ) parser.addoption( - "--enterprise", action="store_true", help="Run tests in an enterprise setup" + "--foxx-path", + action="store", + default="/tests/static/service.zip", + help="Foxx tests service path", + ) + parser.addoption( + "--backup-path", + action="store", + default="local://tmp", + help="Backup tests repository path", + ) + parser.addoption( + "--skip", + action="store", + nargs="*", + choices=[ + "backup", # backup tests + "jwt-secret-keyfile", # server was not configured with a keyfile + "foxx", # foxx is not supported + "js-transactions", # javascript transactions are not supported + "task", # tasks API + "enterprise", # skip what used to be "enterprise-only" before 3.12 + ], + default=[], + help="Skip specific tests", ) def pytest_configure(config): - ports = config.getoption("port") + ports = config.getoption("port") or ["8529"] hosts = [f"http://{config.getoption('host')}:{p}" for p in ports] url = hosts[0] @@ -69,7 +95,9 @@ def pytest_configure(config): global_data.secret = config.getoption("secret") global_data.token = JwtToken.generate_token(global_data.secret) global_data.cluster = config.getoption("cluster") - global_data.enterprise = config.getoption("enterprise") + global_data.skip = config.getoption("skip") + global_data.backup_path = config.getoption("backup_path") + global_data.foxx_path = config.getoption("foxx_path") global_data.graph_name = generate_graph_name() async def get_db_version(): @@ -112,8 +140,18 @@ def cluster(): @pytest.fixture -def enterprise(): - return global_data.enterprise +def backup_path(): + return global_data.backup_path + + +@pytest.fixture +def foxx_path(): + return global_data.foxx_path + + +@pytest.fixture +def skip_tests(): + return global_data.skip @pytest.fixture @@ -256,6 +294,19 @@ async def teardown(): verify=False, ) + # Remove all tasks + test_tasks = [ + task + for task in await sys_db.tasks() + if task["name"].startswith("test_task") + ] + await asyncio.gather( + *( + sys_db.delete_task(task["id"], ignore_missing=True) + for task in test_tasks + ) + ) + # Remove all test users. tst_users = [ user["user"] diff --git a/tests/helpers.py b/tests/helpers.py index f2f63f7..2bc04a5 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -62,3 +62,39 @@ def generate_analyzer_name(): str: Random analyzer name. """ return f"test_analyzer_{uuid4().hex}" + + +def generate_task_name(): + """Generate and return a random task name. + + Returns: + str: Random task name. + """ + return f"test_task_{uuid4().hex}" + + +def generate_task_id(): + """Generate and return a random task ID. + + Returns: + str: Random task ID + """ + return f"test_task_id_{uuid4().hex}" + + +def generate_service_mount(): + """Generate and return a random service name. + + Returns: + str: Random service name. + """ + return f"/test_{uuid4().hex}" + + +def generate_token_name(): + """Generate and return a random token name. + + Returns: + str: Random token name. + """ + return f"test_token_{uuid4().hex}" diff --git a/tests/static/cluster-3.11.conf b/tests/static/cluster-3.11.conf deleted file mode 100644 index 86f7855..0000000 --- a/tests/static/cluster-3.11.conf +++ /dev/null @@ -1,14 +0,0 @@ -[starter] -mode = cluster -local = true -address = 0.0.0.0 -port = 8528 - -[auth] -jwt-secret = /tests/static/keyfile - -[args] -all.database.password = passwd -all.database.extended-names = true -all.log.api-enabled = true -all.javascript.allow-admin-execute = true diff --git a/tests/static/service.zip b/tests/static/service.zip new file mode 100644 index 0000000..00bf513 Binary files /dev/null and b/tests/static/service.zip differ diff --git a/tests/static/single-3.11.conf b/tests/static/single-3.11.conf deleted file mode 100644 index df45cb7..0000000 --- a/tests/static/single-3.11.conf +++ /dev/null @@ -1,12 +0,0 @@ -[starter] -mode = single -address = 0.0.0.0 -port = 8528 - -[auth] -jwt-secret = /tests/static/keyfile - -[args] -all.database.password = passwd -all.database.extended-names = true -all.javascript.allow-admin-execute = true diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py index 856b6d7..0557f64 100644 --- a/tests/test_analyzer.py +++ b/tests/test_analyzer.py @@ -11,7 +11,7 @@ @pytest.mark.asyncio -async def test_analyzer_management(db, bad_db, enterprise, db_version): +async def test_analyzer_management(db, bad_db, skip_tests, db_version): analyzer_name = generate_analyzer_name() full_analyzer_name = db.name + "::" + analyzer_name bad_analyzer_name = generate_analyzer_name() @@ -68,7 +68,7 @@ async def test_analyzer_management(db, bad_db, enterprise, db_version): assert await db.delete_analyzer(analyzer_name, ignore_missing=True) is False # Test create geo_s2 analyzer - if enterprise: + if "enterprise" not in skip_tests: analyzer_name = generate_analyzer_name() result = await db.create_analyzer(analyzer_name, "geo_s2", properties={}) assert result["type"] == "geo_s2" diff --git a/tests/test_aql.py b/tests/test_aql.py index ab5ba19..28fa91c 100644 --- a/tests/test_aql.py +++ b/tests/test_aql.py @@ -21,6 +21,7 @@ AQLQueryClearError, AQLQueryExecuteError, AQLQueryExplainError, + AQLQueryHistoryError, AQLQueryKillError, AQLQueryListError, AQLQueryRulesGetError, @@ -96,6 +97,8 @@ async def test_list_queries(superuser, db, bad_db): _ = await superuser.aql.slow_queries(all_queries=True) await aql.clear_slow_queries() await superuser.aql.clear_slow_queries(all_queries=True) + history = await superuser.aql.history() + assert isinstance(history, dict) with pytest.raises(AQLQueryListError): _ = await bad_db.aql.queries() @@ -109,6 +112,8 @@ async def test_list_queries(superuser, db, bad_db): _ = await aql.slow_queries(all_queries=True) with pytest.raises(AQLQueryClearError): await aql.clear_slow_queries(all_queries=True) + with pytest.raises(AQLQueryHistoryError): + _ = await bad_db.aql.history() long_running_task.cancel() @@ -274,17 +279,15 @@ async def test_cache_plan_management(db, bad_db, doc_col, docs, db_version): entries = await cache.plan_entries() assert isinstance(entries, list) assert len(entries) > 0 - with pytest.raises(AQLCacheEntriesError) as err: - _ = await bad_db.aql.cache.plan_entries() - assert err.value.error_code == FORBIDDEN + with pytest.raises(AQLCacheEntriesError): + await bad_db.aql.cache.plan_entries() # Clear the cache await cache.clear_plan() entries = await cache.plan_entries() assert len(entries) == 0 - with pytest.raises(AQLCacheClearError) as err: + with pytest.raises(AQLCacheClearError): await bad_db.aql.cache.clear_plan() - assert err.value.error_code == FORBIDDEN @pytest.mark.asyncio diff --git a/tests/test_backup.py b/tests/test_backup.py new file mode 100644 index 0000000..7e6e37e --- /dev/null +++ b/tests/test_backup.py @@ -0,0 +1,44 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import BackupDeleteError, BackupRestoreError + + +@pytest.mark.asyncio +async def test_backup( + url, sys_db_name, bad_db, token, cluster, db_version, skip_tests, backup_path +): + if "enterprise" in skip_tests: + pytest.skip("Backup API is only available in ArangoDB Enterprise Edition") + if not cluster: + pytest.skip("For simplicity, the backup API is only tested in cluster setups") + if db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the backup API is only tested in the latest versions" + ) + if "backup" in skip_tests: + pytest.skip("Skipping backup tests") + + with pytest.raises(BackupRestoreError): + await bad_db.backup.restore("foobar") + with pytest.raises(BackupDeleteError): + await bad_db.backup.delete("foobar") + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + backup = db.backup + result = await backup.create() + backup_id = result["id"] + result = await backup.get() + assert "list" in result + result = await backup.restore(backup_id) + assert "previous" in result + config = {"local": {"type": "local"}} + result = await backup.upload(backup_id, repository=backup_path, config=config) + assert "uploadId" in result + result = await backup.download(backup_id, repository=backup_path, config=config) + assert "downloadId" in result + await backup.delete(backup_id) diff --git a/tests/test_client.py b/tests/test_client.py index 6210412..2218384 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -1,11 +1,20 @@ +import time + import pytest from arangoasync.auth import JwtToken from arangoasync.client import ArangoClient from arangoasync.compression import DefaultCompressionManager +from arangoasync.exceptions import ( + AccessTokenCreateError, + AccessTokenDeleteError, + AccessTokenListError, + ServerEncryptionError, +) from arangoasync.http import DefaultHTTPClient from arangoasync.resolver import DefaultHostResolver, RoundRobinHostResolver from arangoasync.version import __version__ +from tests.helpers import generate_token_name @pytest.mark.asyncio @@ -120,16 +129,30 @@ async def test_client_jwt_auth(url, sys_db_name, basic_auth_root): @pytest.mark.asyncio async def test_client_jwt_superuser_auth( - url, sys_db_name, basic_auth_root, token, enterprise + url, sys_db_name, basic_auth_root, token, skip_tests ): # successful authentication async with ArangoClient(hosts=url) as client: db = await client.db( sys_db_name, auth_method="superuser", token=token, verify=True ) - if enterprise: + if "enterprise" not in skip_tests: await db.jwt_secrets() - await db.reload_jwt_secrets() + if "jwt-secret-keyfile" not in skip_tests: + await db.reload_jwt_secrets() + + # Get TLS data + tls = await db.tls() + assert isinstance(tls, dict) + + # Reload TLS data + tls = await db.reload_tls() + assert isinstance(tls, dict) + + # Rotate + with pytest.raises(ServerEncryptionError): + # Not allowed on coordinators + await db.encryption() # token missing async with ArangoClient(hosts=url) as client: @@ -137,3 +160,49 @@ async def test_client_jwt_superuser_auth( await client.db( sys_db_name, auth_method="superuser", auth=basic_auth_root, verify=True ) + + +@pytest.mark.asyncio +async def test_client_access_token(url, sys_db_name, basic_auth_root, bad_db): + username = basic_auth_root.username + + async with ArangoClient(hosts=url) as client: + # First login with basic auth + db_auth_basic = await client.db( + sys_db_name, + auth_method="basic", + auth=basic_auth_root, + verify=True, + ) + + # Create an access token + token_name = generate_token_name() + token = await db_auth_basic.create_access_token( + user=username, name=token_name, valid_until=int(time.time() + 3600) + ) + assert token.active is True + + # Cannot create a token with the same name + with pytest.raises(AccessTokenCreateError): + await db_auth_basic.create_access_token( + user=username, name=token_name, valid_until=int(time.time() + 3600) + ) + + # Authenticate with the created token + access_token_db = await client.db( + sys_db_name, + auth_method="basic", + auth=token.token, + verify=True, + ) + + # List access tokens + tokens = await access_token_db.list_access_tokens(username) + assert isinstance(tokens, list) + with pytest.raises(AccessTokenListError): + await bad_db.list_access_tokens(username) + + # Clean up - delete the created token + await access_token_db.delete_access_token(username, token.id) + with pytest.raises(AccessTokenDeleteError): + await access_token_db.delete_access_token(username, token.id) diff --git a/tests/test_cluster.py b/tests/test_cluster.py new file mode 100644 index 0000000..9a68a6b --- /dev/null +++ b/tests/test_cluster.py @@ -0,0 +1,101 @@ +import pytest +from packaging import version + +from arangoasync.client import ArangoClient +from arangoasync.exceptions import ( + ClusterEndpointsError, + ClusterHealthError, + ClusterMaintenanceModeError, + ClusterRebalanceError, + ClusterServerIDError, + ClusterServerRoleError, + ClusterStatisticsError, +) + + +@pytest.mark.asyncio +async def test_cluster( + url, sys_db_name, bad_db, token, skip_tests, cluster, db_version +): + if not cluster: + pytest.skip("Cluster API is only tested in cluster setups") + if "enterprise" in skip_tests or db_version < version.parse("3.12.0"): + pytest.skip( + "For simplicity, the cluster API is only tested in the latest versions" + ) + + # Test errors + with pytest.raises(ClusterHealthError): + await bad_db.cluster.health() + with pytest.raises(ClusterStatisticsError): + await bad_db.cluster.statistics("foo") + with pytest.raises(ClusterEndpointsError): + await bad_db.cluster.endpoints() + with pytest.raises(ClusterServerIDError): + await bad_db.cluster.server_id() + with pytest.raises(ClusterServerRoleError): + await bad_db.cluster.server_role() + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_maintenance_mode("on") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.toggle_server_maintenance_mode("PRMR0001", "normal") + with pytest.raises(ClusterMaintenanceModeError): + await bad_db.cluster.server_maintenance_mode("PRMR0001") + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_imbalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.rebalance() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.calculate_rebalance_plan() + with pytest.raises(ClusterRebalanceError): + await bad_db.cluster.execute_rebalance_plan(moves=[]) + + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + cluster = db.cluster + + # Cluster health + health = await cluster.health() + assert "Health" in health + + # DB-Server statistics + db_server = None + for server in health["Health"]: + if server.startswith("PRMR"): + db_server = server + break + assert db_server is not None, f"No DB server found in {health}" + stats = await cluster.statistics(db_server) + assert "enabled" in stats + + # Cluster endpoints + endpoints = await cluster.endpoints() + assert len(endpoints) > 0 + + # Cluster server ID and role + server_id = await cluster.server_id() + assert isinstance(server_id, str) + server_role = await cluster.server_role() + assert isinstance(server_role, str) + + # Maintenance mode + await cluster.toggle_maintenance_mode("on") + await cluster.toggle_maintenance_mode("off") + await cluster.toggle_server_maintenance_mode( + db_server, "maintenance", timeout=30 + ) + status = await cluster.server_maintenance_mode(db_server) + assert isinstance(status, dict) + await cluster.toggle_server_maintenance_mode(db_server, "normal") + + # Rebalance + result = await cluster.calculate_imbalance() + assert isinstance(result, dict) + result = await cluster.calculate_rebalance_plan() + assert isinstance(result, dict) + result = await cluster.execute_rebalance_plan(moves=[]) + assert result == 200 + result = await cluster.rebalance() + assert isinstance(result, dict) diff --git a/tests/test_collection.py b/tests/test_collection.py index d9214dd..2dc4c42 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -4,15 +4,26 @@ from arangoasync.errno import DATA_SOURCE_NOT_FOUND, INDEX_NOT_FOUND from arangoasync.exceptions import ( + CollectionChecksumError, + CollectionCompactError, + CollectionConfigureError, CollectionPropertiesError, + CollectionRecalculateCountError, + CollectionRenameError, + CollectionResponsibleShardError, + CollectionRevisionError, + CollectionShardsError, + CollectionStatisticsError, CollectionTruncateError, DocumentCountError, + DocumentInsertError, IndexCreateError, IndexDeleteError, IndexGetError, IndexListError, IndexLoadError, ) +from tests.helpers import generate_col_name def test_collection_attributes(db, doc_col): @@ -22,7 +33,9 @@ def test_collection_attributes(db, doc_col): @pytest.mark.asyncio -async def test_collection_misc_methods(doc_col, bad_col): +async def test_collection_misc_methods(doc_col, bad_col, docs, cluster): + doc = await doc_col.insert(docs[0]) + # Properties properties = await doc_col.properties() assert properties.name == doc_col.name @@ -31,6 +44,75 @@ async def test_collection_misc_methods(doc_col, bad_col): with pytest.raises(CollectionPropertiesError): await bad_col.properties() + # Configure + wfs = not properties.wait_for_sync + new_properties = await doc_col.configure(wait_for_sync=wfs) + assert new_properties.wait_for_sync == wfs + with pytest.raises(CollectionConfigureError): + await bad_col.configure(wait_for_sync=wfs) + + # Statistics + statistics = await doc_col.statistics() + assert statistics.name == doc_col.name + assert "figures" in statistics + with pytest.raises(CollectionStatisticsError): + await bad_col.statistics() + + # Shards + if cluster: + shard = await doc_col.responsible_shard(doc) + assert isinstance(shard, str) + with pytest.raises(CollectionResponsibleShardError): + await bad_col.responsible_shard(doc) + shards = await doc_col.shards(details=True) + assert isinstance(shards, dict) + with pytest.raises(CollectionShardsError): + await bad_col.shards() + + # Revision + revision = await doc_col.revision() + assert isinstance(revision, str) + with pytest.raises(CollectionRevisionError): + await bad_col.revision() + + # Checksum + checksum = await doc_col.checksum(with_rev=True, with_data=True) + assert isinstance(checksum, str) + with pytest.raises(CollectionChecksumError): + await bad_col.checksum() + + # Recalculate count + with pytest.raises(CollectionRecalculateCountError): + await bad_col.recalculate_count() + await doc_col.recalculate_count() + + # Compact + with pytest.raises(CollectionCompactError): + await bad_col.compact() + res = await doc_col.compact() + assert res.name == doc_col.name + + +@pytest.mark.asyncio +async def test_collection_rename(cluster, db, bad_col, docs): + if cluster: + pytest.skip("Renaming collections is not supported in cluster deployments.") + + with pytest.raises(CollectionRenameError): + await bad_col.rename("new_name") + + col_name = generate_col_name() + new_name = generate_col_name() + try: + await db.create_collection(col_name) + col = db.collection(col_name) + await col.rename(new_name) + assert col.name == new_name + doc = await col.insert(docs[0]) + assert col.get_col_name(doc) == new_name + finally: + db.delete_collection(new_name, ignore_missing=True) + @pytest.mark.asyncio async def test_collection_index(doc_col, bad_col, cluster): @@ -182,3 +264,20 @@ async def test_collection_truncate_count(docs, doc_col, bad_col): await doc_col.truncate(wait_for_sync=True, compact=True) cnt = await doc_col.count() assert cnt == 0 + + +@pytest.mark.asyncio +async def test_collection_import_bulk(doc_col, bad_col, docs): + documents = "\n".join(doc_col.serializer.dumps(doc) for doc in docs) + + # Test errors + with pytest.raises(DocumentInsertError): + await bad_col.import_bulk(documents, doc_type="documents") + + # Insert documents in bulk + result = await doc_col.import_bulk(documents, doc_type="documents") + + # Verify the documents were inserted + count = await doc_col.count() + assert count == len(docs) + assert result["created"] == count diff --git a/tests/test_connection.py b/tests/test_connection.py index 568815c..e053e58 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -223,6 +223,12 @@ async def test_JwtConnection_ping_success( status_code = await connection1.ping() assert status_code == 200 + # Refresh the token + await connection3.refresh_token() + status_code = await connection1.ping() + assert status_code == 200 + assert connection3.token != connection1.token + @pytest.mark.asyncio async def test_JwtSuperuserConnection_ping_success( diff --git a/tests/test_database.py b/tests/test_database.py index eb7daa3..519d0ce 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -1,27 +1,66 @@ import asyncio +import datetime +import json import pytest +from packaging import version +from arangoasync.client import ArangoClient from arangoasync.collection import StandardCollection from arangoasync.exceptions import ( CollectionCreateError, CollectionDeleteError, + CollectionKeyGeneratorsError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, JWTSecretListError, JWTSecretReloadError, + ReplicationApplierConfigError, + ReplicationApplierStateError, + ReplicationClusterInventoryError, + ReplicationDumpError, + ReplicationInventoryError, + ReplicationLoggerStateError, + ReplicationServerIDError, + ServerApiCallsError, + ServerAvailableOptionsGetError, + ServerCheckAvailabilityError, + ServerCurrentOptionsGetError, + ServerEchoError, + ServerEngineError, + ServerExecuteError, + ServerLicenseGetError, + ServerLicenseSetError, + ServerLogLevelError, + ServerLogLevelResetError, + ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, + ServerMetricsError, + ServerModeError, + ServerModeSetError, + ServerReadLogError, + ServerReloadRoutingError, + ServerShutdownError, + ServerShutdownProgressError, ServerStatusError, + ServerTimeError, ServerVersionError, ) +from arangoasync.request import Method, Request from arangoasync.typings import CollectionType, KeyOptions, UserInfo from tests.helpers import generate_col_name, generate_db_name, generate_username @pytest.mark.asyncio -async def test_database_misc_methods(sys_db, db, bad_db, cluster): +async def test_database_misc_methods( + sys_db, db, bad_db, cluster, db_version, url, sys_db_name, token, skip_tests +): # Status status = await sys_db.status() assert status["server"] == "arango" @@ -50,11 +89,172 @@ async def test_database_misc_methods(sys_db, db, bad_db, cluster): await bad_db.reload_jwt_secrets() # Version - version = await sys_db.version() - assert version["version"].startswith("3.") + v = await sys_db.version() + assert v["version"].startswith("3.") with pytest.raises(ServerVersionError): await bad_db.version() + # key generators + if db_version >= version.parse("3.12.0"): + key_generators = await db.key_generators() + assert isinstance(key_generators, list) + with pytest.raises(CollectionKeyGeneratorsError): + await bad_db.key_generators() + + # Administration + with pytest.raises(ServerEngineError): + await bad_db.engine() + result = await db.engine() + assert isinstance(result, dict) + + with pytest.raises(ServerTimeError): + await bad_db.time() + time = await db.time() + assert isinstance(time, datetime.datetime) + + with pytest.raises(ServerCheckAvailabilityError): + await bad_db.check_availability() + assert isinstance(await db.check_availability(), str) + + with pytest.raises(DatabaseSupportInfoError): + await bad_db.support_info() + info = await sys_db.support_info() + assert isinstance(info, dict) + + if db_version >= version.parse("3.12.0"): + with pytest.raises(ServerCurrentOptionsGetError): + await bad_db.options() + options = await sys_db.options() + assert isinstance(options, dict) + with pytest.raises(ServerAvailableOptionsGetError): + await bad_db.options_available() + options_available = await sys_db.options_available() + assert isinstance(options_available, dict) + + with pytest.raises(ServerModeError): + await bad_db.mode() + mode = await sys_db.mode() + assert isinstance(mode, str) + with pytest.raises(ServerModeSetError): + await bad_db.set_mode("foo") + mode = await sys_db.set_mode("default") + assert isinstance(mode, str) + + with pytest.raises(ServerLicenseGetError): + await bad_db.license() + license = await sys_db.license() + assert isinstance(license, dict) + with pytest.raises(ServerLicenseSetError): + await sys_db.set_license('"abc"') + + with pytest.raises(ServerShutdownError): + await bad_db.shutdown() + with pytest.raises(ServerShutdownProgressError): + await bad_db.shutdown_progress() + + with pytest.raises(ServerReloadRoutingError): + await bad_db.reload_routing() + await sys_db.reload_routing() + + with pytest.raises(ServerEchoError): + await bad_db.echo() + result = await sys_db.echo() + assert isinstance(result, dict) + + with pytest.raises(ServerExecuteError): + await bad_db.execute("return 1") + result = await sys_db.execute("return 1") + assert result == 1 + + with pytest.raises(DatabaseCompactError): + await bad_db.compact() + async with ArangoClient(hosts=url) as client: + db = await client.db( + sys_db_name, auth_method="superuser", token=token, verify=True + ) + await db.compact() + + # Custom Request + request = Request( + method=Method.POST, endpoint="/_admin/execute", data="return 1".encode("utf-8") + ) + response = await sys_db.request(request) + assert json.loads(response.raw_body) == 1 + + if "enterprise" not in skip_tests and db_version >= version.parse("3.12.0"): + # API calls + with pytest.raises(ServerApiCallsError): + await bad_db.api_calls() + result = await sys_db.api_calls() + assert isinstance(result, dict) + + +@pytest.mark.asyncio +async def test_metrics(db, bad_db): + with pytest.raises(ServerMetricsError): + await bad_db.metrics() + metrics = await db.metrics() + assert isinstance(metrics, str) + + +@pytest.mark.asyncio +async def test_replication(db, bad_db, cluster): + with pytest.raises(ReplicationInventoryError): + await bad_db.replication.inventory("id") + with pytest.raises(ReplicationDumpError): + await bad_db.replication.dump("test_collection") + if cluster: + with pytest.raises(ReplicationClusterInventoryError): + await bad_db.replication.cluster_inventory() + result = await db.replication.cluster_inventory() + assert isinstance(result, dict) + if not cluster: + with pytest.raises(ReplicationLoggerStateError): + await bad_db.replication.logger_state() + result = await db.replication.logger_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierConfigError): + await bad_db.replication.applier_config() + result = await db.replication.applier_config() + assert isinstance(result, dict) + with pytest.raises(ReplicationApplierStateError): + await bad_db.replication.applier_state() + result = await db.replication.applier_state() + assert isinstance(result, dict) + with pytest.raises(ReplicationServerIDError): + await bad_db.replication.server_id() + result = await db.replication.server_id() + assert isinstance(result, str) + + +@pytest.mark.asyncio +async def test_logs(sys_db, bad_db): + with pytest.raises(ServerReadLogError): + await bad_db.read_log_entries() + result = await sys_db.read_log_entries() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelError): + await bad_db.log_levels() + result = await sys_db.log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelSetError): + await bad_db.set_log_levels() + new_levels = {"agency": "DEBUG", "engines": "INFO", "threads": "WARNING"} + result = await sys_db.set_log_levels(**new_levels) + assert isinstance(result, dict) + with pytest.raises(ServerLogLevelResetError): + await bad_db.reset_log_levels() + result = await sys_db.reset_log_levels() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingError): + await bad_db.log_settings() + result = await sys_db.log_settings() + assert isinstance(result, dict) + with pytest.raises(ServerLogSettingSetError): + await bad_db.set_log_settings() + result = await sys_db.set_log_settings() + assert isinstance(result, dict) + @pytest.mark.asyncio async def test_create_drop_database( diff --git a/tests/test_foxx.py b/tests/test_foxx.py new file mode 100644 index 0000000..e972dc2 --- /dev/null +++ b/tests/test_foxx.py @@ -0,0 +1,247 @@ +import asyncio +import json + +import aiofiles +import aiohttp +import pytest + +from arangoasync.exceptions import ( + FoxxCommitError, + FoxxConfigGetError, + FoxxConfigReplaceError, + FoxxConfigUpdateError, + FoxxDependencyGetError, + FoxxDependencyReplaceError, + FoxxDependencyUpdateError, + FoxxDevModeDisableError, + FoxxDevModeEnableError, + FoxxDownloadError, + FoxxReadmeGetError, + FoxxScriptListError, + FoxxScriptRunError, + FoxxServiceCreateError, + FoxxServiceDeleteError, + FoxxServiceGetError, + FoxxServiceListError, + FoxxServiceReplaceError, + FoxxServiceUpdateError, + FoxxSwaggerGetError, + FoxxTestRunError, +) +from tests.helpers import generate_service_mount + +service_name = "test" + + +@pytest.mark.asyncio +async def test_foxx(db, bad_db, skip_tests, foxx_path): + if "foxx" in skip_tests: + pytest.skip("Skipping Foxx tests") + + # Test errors + with pytest.raises(FoxxServiceGetError): + await bad_db.foxx.service(service_name) + with pytest.raises(FoxxServiceListError): + await bad_db.foxx.services() + with pytest.raises(FoxxServiceCreateError): + await bad_db.foxx.create_service( + mount=generate_service_mount(), + service={}, + headers={"content-type": "application/zip"}, + ) + with pytest.raises(FoxxServiceDeleteError): + await bad_db.foxx.delete_service(service_name) + with pytest.raises(FoxxServiceReplaceError): + await bad_db.foxx.replace_service( + mount=generate_service_mount(), + service={}, + ) + with pytest.raises(FoxxServiceUpdateError): + await bad_db.foxx.update_service(mount=generate_service_mount(), service={}) + with pytest.raises(FoxxConfigGetError): + await bad_db.foxx.config("foo") + with pytest.raises(FoxxConfigReplaceError): + await bad_db.foxx.replace_config(mount="foo", options={}) + with pytest.raises(FoxxConfigUpdateError): + await bad_db.foxx.update_config(mount="foo", options={}) + with pytest.raises(FoxxDependencyGetError): + await bad_db.foxx.dependencies("foo") + with pytest.raises(FoxxDependencyReplaceError): + await bad_db.foxx.replace_dependencies(mount="foo", options={}) + with pytest.raises(FoxxDependencyUpdateError): + await bad_db.foxx.update_dependencies(mount="foo", options={}) + with pytest.raises(FoxxDevModeEnableError): + await bad_db.foxx.enable_development("foo") + with pytest.raises(FoxxDevModeDisableError): + await bad_db.foxx.disable_development("foo") + with pytest.raises(FoxxReadmeGetError): + await bad_db.foxx.readme("foo") + with pytest.raises(FoxxSwaggerGetError): + await bad_db.foxx.swagger("foo") + with pytest.raises(FoxxDownloadError): + await bad_db.foxx.download("foo") + with pytest.raises(FoxxCommitError): + await bad_db.foxx.commit() + + services = await db.foxx.services() + assert isinstance(services, list) + + # Service as a path + mount1 = generate_service_mount() + service1 = { + "source": foxx_path, + "configuration": {"LOG_LEVEL": "info"}, + "dependencies": {}, + } + service_info = await db.foxx.create_service(mount=mount1, service=service1) + assert service_info["mount"] == mount1 + + # Service as a FormData + mount2 = generate_service_mount() + service2 = aiohttp.FormData() + service2.add_field( + "source", + open(f".{foxx_path}", "rb"), + filename="service.zip", + content_type="application/zip", + ) + service2.add_field("configuration", json.dumps({"LOG_LEVEL": "info"})) + service2.add_field("dependencies", json.dumps({})) + service_info = await db.foxx.create_service( + mount=mount2, service=service2, headers={"content-type": "multipart/form-data"} + ) + assert service_info["mount"] == mount2 + + # Service as raw data + mount3 = generate_service_mount() + async with aiofiles.open(f".{foxx_path}", mode="rb") as f: + service3 = await f.read() + service_info = await db.foxx.create_service( + mount=mount3, service=service3, headers={"content-type": "application/zip"} + ) + assert service_info["mount"] == mount3 + + # Delete service + await db.foxx.delete_service(mount3) + + # Replace service + service4 = { + "source": foxx_path, + "configuration": {"LOG_LEVEL": "info"}, + "dependencies": {}, + } + service_info = await db.foxx.replace_service(mount=mount2, service=service4) + assert service_info["mount"] == mount2 + + async with aiofiles.open(f".{foxx_path}", mode="rb") as f: + service5 = await f.read() + service_info = await db.foxx.replace_service( + mount=mount1, service=service5, headers={"content-type": "application/zip"} + ) + assert service_info["mount"] == mount1 + + # Update service + service6 = { + "source": foxx_path, + "configuration": {"LOG_LEVEL": "debug"}, + "dependencies": {}, + } + service_info = await db.foxx.update_service(mount=mount1, service=service6) + assert service_info["mount"] == mount1 + + services = await db.foxx.services(exclude_system=True) + assert len(services) == 2 + + # Configuration + config = await db.foxx.config(mount1) + assert isinstance(config, dict) + config = await db.foxx.replace_config(mount=mount1, options={}) + assert isinstance(config, dict) + config = await db.foxx.replace_config(mount=mount1, options={}) + assert isinstance(config, dict) + + # Dependencies + config = await db.foxx.dependencies(mount1) + assert isinstance(config, dict) + config = await db.foxx.replace_dependencies(mount=mount1, options={}) + assert isinstance(config, dict) + config = await db.foxx.update_dependencies(mount=mount1, options={}) + assert isinstance(config, dict) + + # Scripts + scripts = await db.foxx.scripts(mount1) + assert "setup" in scripts + assert "teardown" in scripts + + # List missing service scripts + with pytest.raises(FoxxScriptListError): + await db.foxx.scripts("invalid_mount") + + # Run service script + assert await db.foxx.run_script(mount1, "setup", []) == {} + assert await db.foxx.run_script(mount2, "teardown", []) == {} + + # Run missing service script + with pytest.raises(FoxxScriptRunError): + await db.foxx.run_script(mount1, "invalid", ()) + + # Run tests on service + result = await db.foxx.run_tests( + mount=mount1, reporter="suite", idiomatic=True, filter="science" + ) + result = json.loads(result) + assert "stats" in result + assert "tests" in result + assert "suites" in result + + result = await db.foxx.run_tests( + mount=mount2, reporter="stream", output_format="x-ldjson" + ) + for result_part in result.split("\r\n"): + if len(result_part) == 0: + continue + assert result_part.startswith("[") + assert result_part.endswith("]") + + result = await db.foxx.run_tests( + mount=mount1, reporter="stream", output_format="text" + ) + assert result.startswith("[") + assert result.endswith("]") or result.endswith("\r\n") + + result = await db.foxx.run_tests( + mount=mount2, reporter="xunit", output_format="xml" + ) + assert result.startswith("[") + assert result.endswith("]") or result.endswith("\r\n") + + # Run tests on missing service + with pytest.raises(FoxxTestRunError): + await db.foxx.run_tests("foo") + + # Development mode + result = await db.foxx.enable_development(mount1) + assert result["mount"] == mount1 + result = await db.foxx.disable_development(mount1) + assert result["mount"] == mount1 + + # Readme + result = await db.foxx.readme(mount1) + assert isinstance(result, str) + + # Swagger + result = await db.foxx.swagger(mount1) + assert isinstance(result, dict) + + # Download service + result = await db.foxx.download(mount1) + assert isinstance(result, bytes) + + # Commit + await db.foxx.commit(replace=True) + + # Delete remaining services + await asyncio.gather( + db.foxx.delete_service(mount1), + db.foxx.delete_service(mount2), + ) diff --git a/tests/test_graph.py b/tests/test_graph.py index 6d5fcbe..5d70255 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -56,10 +56,10 @@ async def test_graph_basic(db, bad_db): @pytest.mark.asyncio -async def test_graph_properties(db, bad_graph, cluster, enterprise): +async def test_graph_properties(db, bad_graph, cluster, skip_tests): # Create a graph name = generate_graph_name() - is_smart = cluster and enterprise + is_smart = cluster and "enterprise" not in skip_tests options = GraphOptions(number_of_shards=3) graph = await db.create_graph(name, is_smart=is_smart, options=options) diff --git a/tests/test_task.py b/tests/test_task.py new file mode 100644 index 0000000..008e25d --- /dev/null +++ b/tests/test_task.py @@ -0,0 +1,82 @@ +import pytest + +from arangoasync.exceptions import ( + TaskCreateError, + TaskDeleteError, + TaskGetError, + TaskListError, +) +from tests.helpers import generate_task_id, generate_task_name + + +@pytest.mark.asyncio +async def test_task_management(sys_db, bad_db, skip_tests): + # This test intentionally uses the system database because cleaning up tasks is + # easier there. + + if "task" in skip_tests: + pytest.skip("Skipping task tests") + + test_command = 'require("@arangodb").print(params);' + + # Test errors + with pytest.raises(TaskCreateError): + await bad_db.create_task(command=test_command) + with pytest.raises(TaskGetError): + await bad_db.task("non_existent_task_id") + with pytest.raises(TaskListError): + await bad_db.tasks() + with pytest.raises(TaskDeleteError): + await bad_db.delete_task("non_existent_task_id") + + # Create a task with a random ID + task_name = generate_task_name() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + ) + assert new_task["name"] == task_name + task_id = new_task["id"] + assert await sys_db.task(task_id) == new_task + + # Delete task + assert await sys_db.delete_task(task_id) is True + + # Create a task with a specific ID + task_name = generate_task_name() + task_id = generate_task_id() + new_task = await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + offset=1, + period=10, + task_id=task_id, + ) + assert new_task["name"] == task_name + assert new_task["id"] == task_id + + # Try to create a duplicate task + with pytest.raises(TaskCreateError): + await sys_db.create_task( + name=task_name, + command=test_command, + params={"foo": 1, "bar": 2}, + task_id=task_id, + ) + + # Test get missing task + with pytest.raises(TaskGetError): + await sys_db.task(generate_task_id()) + + # Test list tasks + tasks = await sys_db.tasks() + assert len(tasks) == 1 + + # Delete tasks + assert await sys_db.delete_task(task_id) is True + assert await sys_db.delete_task(task_id, ignore_missing=True) is False + with pytest.raises(TaskDeleteError): + await sys_db.delete_task(task_id) diff --git a/tests/test_transaction.py b/tests/test_transaction.py index f7d7f76..1a7363c 100644 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -14,7 +14,10 @@ @pytest.mark.asyncio -async def test_transaction_execute_raw(db, doc_col, docs): +async def test_transaction_execute_raw(db, doc_col, docs, skip_tests): + if "js-transactions" in skip_tests: + pytest.skip("Skipping JS transaction tests") + # Test a valid JS transaction doc = docs[0] key = doc["_key"] diff --git a/tests/test_typings.py b/tests/test_typings.py index fd04fa1..48e9eb0 100644 --- a/tests/test_typings.py +++ b/tests/test_typings.py @@ -1,7 +1,9 @@ import pytest from arangoasync.typings import ( + AccessToken, CollectionInfo, + CollectionStatistics, CollectionStatus, CollectionType, EdgeDefinitionOptions, @@ -386,3 +388,87 @@ def test_EdgeDefinitionOptions(): ) assert options.satellites == ["col1", "col2"] + + +def test_CollectionStatistics(): + data = { + "figures": { + "indexes": {"count": 1, "size": 1234}, + "documentsSize": 5601, + "cacheInUse": False, + "cacheSize": 0, + "cacheUsage": 0, + }, + "writeConcern": 1, + "waitForSync": False, + "usesRevisionsAsDocumentIds": True, + "syncByRevision": True, + "statusString": "loaded", + "id": "69123", + "isSmartChild": False, + "schema": None, + "name": "products", + "type": 2, + "status": 3, + "count": 1, + "cacheEnabled": False, + "isSystem": False, + "internalValidatorType": 0, + "globallyUniqueId": "hB7C02EE43DCE/69123", + "keyOptions": { + "allowUserKeys": True, + "type": "traditional", + "lastValue": 69129, + }, + "computedValues": None, + "objectId": "69124", + } + + stats = CollectionStatistics(data) + + assert stats.figures == data["figures"] + assert stats.write_concern == 1 + assert stats.wait_for_sync is False + assert stats.use_revisions_as_document_ids is True + assert stats.sync_by_revision is True + assert stats.status_string == "loaded" + assert stats.id == "69123" + assert stats.is_smart_child is False + assert stats.schema is None + assert stats.name == "products" + assert stats.type == CollectionType.DOCUMENT + assert stats.status == CollectionStatus.LOADED + assert stats.count == 1 + assert stats.cache_enabled is False + assert stats.is_system is False + assert stats.internal_validator_type == 0 + assert stats.globally_unique_id == "hB7C02EE43DCE/69123" + assert isinstance(stats.key_options, KeyOptions) + assert stats.key_options["type"] == "traditional" + assert stats.computed_values is None + assert stats.object_id == "69124" + + +def test_AccessToken(): + data = { + "active": True, + "created_at": 1720000000, + "fingerprint": "abc123fingerprint", + "id": 42, + "name": "ci-token", + "token": "v2.local.eyJhbGciOi...", + "valid_until": 1720003600, + } + + access_token = AccessToken(data) + + assert access_token.active is True + assert access_token.created_at == 1720000000 + assert access_token.fingerprint == "abc123fingerprint" + assert access_token.id == 42 + assert access_token.name == "ci-token" + assert access_token.token == "v2.local.eyJhbGciOi..." + assert access_token.valid_until == 1720003600 + + # JsonWrapper behavior + assert access_token.to_dict() == data