🌐 AI搜索 & 代理 主页
Skip to content

Commit 15dc4fa

Browse files
committed
Addition of async/await example
1 parent a382d46 commit 15dc4fa

File tree

1 file changed

+85
-0
lines changed

1 file changed

+85
-0
lines changed
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
# Demonstration of asyncio with Python (Pytest is the automation framework)
2+
3+
# Includes the following:
4+
5+
# Usage of aiohttp
6+
# Usage of asyncio.gather
7+
# Marking tests as async using the @pytest.mark.asyncio marker
8+
9+
import pytest
10+
import aiohttp
11+
import asyncio
12+
import json
13+
import ssl
14+
import os
15+
import sys
16+
from dotenv import load_dotenv
17+
import certifi
18+
19+
load_dotenv()
20+
21+
user_name = os.getenv('LT_USERNAME')
22+
api_key = os.getenv('LT_ACCESS_KEY')
23+
24+
# Inspiration - https://stackoverflow.com/questions/53199248/get-json-using-python-and-asyncio
25+
async def get_top_reddit_threads(subreddit, session):
26+
url = f"https://www.reddit.com/r/{subreddit}/top.json?sort=top&t=day&limit=20"
27+
28+
# Reference JSON - https://www.reddit.com/r/Playwright/top.json?sort=top&t=day&limit=20
29+
data = await get_json(session, url)
30+
31+
if data:
32+
data_decoded = json.loads(data.decode('utf-8'))
33+
print(f'\nReddit details for {subreddit}')
34+
print(f'____________________________\n')
35+
for post in data_decoded['data']['children']:
36+
score = post['data']['score']
37+
title = post['data']['title']
38+
link = post['data']['url']
39+
if score and title and link:
40+
print(f'Score: {score} | Title: {title} | Link: ({link})')
41+
42+
# Fetch JSON data from a URL
43+
44+
async def get_json(session, url):
45+
headers = {"accept": "application/json"}
46+
try:
47+
async with session.get(url, headers=headers) as response:
48+
# Response 200 - We have the data!
49+
assert response.status == 200
50+
return await response.read()
51+
except aiohttp.client_exceptions.ClientConnectorCertificateError as e:
52+
print(f"SSL Certificate Error: {e}")
53+
return None
54+
except Exception as e:
55+
print(f"Error fetching data: {e}")
56+
return None
57+
58+
# Refer LambdaTest API documentation - https://www.lambdatest.com/support/api-doc/
59+
60+
async def get_lambdatest_sessions(session):
61+
url = f"https://{user_name}:{api_key}@api.lambdatest.com/automation/api/v1/sessions?limit=40"
62+
data = await get_json(session, url)
63+
64+
if data:
65+
data_decoded = json.loads(data.decode('utf-8'))
66+
for test in data_decoded['data']:
67+
test_id = test['test_id']
68+
build_name = test['build_name']
69+
status_ind = test['status_ind']
70+
print(f"Build: {build_name} | ID: {test_id} | Status: {status_ind}")
71+
72+
@pytest.mark.asyncio
73+
async def test_fetch_lambdatest_platforms():
74+
ssl_context = ssl.create_default_context(cafile=certifi.where())
75+
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=ssl_context)) as session:
76+
await get_lambdatest_sessions(session)
77+
78+
@pytest.mark.asyncio
79+
async def test_fetch_reddit_threads():
80+
ssl_context = ssl.create_default_context(cafile=certifi.where())
81+
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=ssl_context)) as session:
82+
subreddits = ['Selenium', 'Playwright', 'Python', 'asyncio']
83+
tasks = [get_top_reddit_threads(subreddit, session) for subreddit in subreddits]
84+
# Gather the tasks using gather() method of asyncio
85+
await asyncio.gather(*tasks)

0 commit comments

Comments
 (0)