From 3e4d088a966ab968cb6f15f85db91df735ffdfc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Karlo=20Do=C5=A1ilovi=C4=87?= Date: Sun, 9 Nov 2025 21:59:51 +0100 Subject: [PATCH 01/19] Update to new minor release. --- pyproject.toml | 2 +- src/judge0/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 58a84aaa..8595aac9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "judge0" -version = "0.0.6" +version = "0.1.0.dev0" description = "The official Python SDK for Judge0." readme = "README.md" requires-python = ">=3.10" diff --git a/src/judge0/__init__.py b/src/judge0/__init__.py index ae62b15f..b74df46e 100644 --- a/src/judge0/__init__.py +++ b/src/judge0/__init__.py @@ -30,7 +30,7 @@ from .retry import MaxRetries, MaxWaitTime, RegularPeriodRetry from .submission import Submission -__version__ = "0.0.6" +__version__ = "0.1.0.dev0" __all__ = [ "ATD", From fa3c86f074577cbb60757b76d1e5f34a74257c84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 00:45:26 +0100 Subject: [PATCH 02/19] Add Custom Judge0 Client example --- README.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/README.md b/README.md index 08de5602..aee1b648 100644 --- a/README.md +++ b/README.md @@ -349,3 +349,25 @@ result = judge0.run(submissions=submission) print(result.stdout) print(result.post_execution_filesystem.find("./my_dir2/my_file2.txt")) ``` + +### Custom Judge0 Client + +This example shows how to use Judge0 Python SDK with your own Judge0 instance. + +```python +import judge0 + +client = judge0.Client(endpoint="http://127.0.0.1:2358", auth_headers=None) + +source_code = """ +#include + +int main() { + printf("hello, world\\n"); + return 0; +} +""" + +result = judge0.run(client=client, source_code=source_code, language=judge0.C) +print(result.stdout) +``` From 3c8efcc23836f800d6f46262d61a714c9ebb9c40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 00:58:11 +0100 Subject: [PATCH 03/19] Simplify custom client example --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index aee1b648..db853f01 100644 --- a/README.md +++ b/README.md @@ -357,7 +357,7 @@ This example shows how to use Judge0 Python SDK with your own Judge0 instance. ```python import judge0 -client = judge0.Client(endpoint="http://127.0.0.1:2358", auth_headers=None) +client = judge0.Client("http://127.0.0.1:2358", None) source_code = """ #include From 0955d857ac2ec5715cfc886cf3b0c61ca2c7b87b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 01:00:34 +0100 Subject: [PATCH 04/19] Don't require auth_headers in judge0.Client --- README.md | 2 +- src/judge0/clients.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index db853f01..c2dc8ae7 100644 --- a/README.md +++ b/README.md @@ -357,7 +357,7 @@ This example shows how to use Judge0 Python SDK with your own Judge0 instance. ```python import judge0 -client = judge0.Client("http://127.0.0.1:2358", None) +client = judge0.Client("http://127.0.0.1:2358") source_code = """ #include diff --git a/src/judge0/clients.py b/src/judge0/clients.py index 49d1ac61..7b50047c 100644 --- a/src/judge0/clients.py +++ b/src/judge0/clients.py @@ -33,7 +33,7 @@ class Client: def __init__( self, endpoint, - auth_headers, + auth_headers=None, *, retry_strategy: Optional[RetryStrategy] = None, ) -> None: From fd2ab3c80e7c6bd48d952ae63c14a817d68b8ce0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 21:27:04 +0100 Subject: [PATCH 05/19] Fix bug in suppressing preview warning message. --- src/judge0/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/judge0/__init__.py b/src/judge0/__init__.py index b74df46e..069c96ca 100644 --- a/src/judge0/__init__.py +++ b/src/judge0/__init__.py @@ -65,9 +65,9 @@ JUDGE0_IMPLICIT_CE_CLIENT = None JUDGE0_IMPLICIT_EXTRA_CE_CLIENT = None -SUPPRESS_PREVIEW_WARNING = os.getenv("JUDGE0_SUPPRESS_PREVIEW_WARNING") logger = logging.getLogger(__name__) +suppress_preview_warning = os.getenv("JUDGE0_SUPPRESS_PREVIEW_WARNING") is not None def _get_implicit_client(flavor: Flavor) -> Client: @@ -107,7 +107,7 @@ def _get_implicit_client(flavor: Flavor) -> Client: def _get_preview_client(flavor: Flavor) -> Union[Judge0CloudCE, Judge0CloudExtraCE]: - if SUPPRESS_PREVIEW_WARNING is not None: + if not suppress_preview_warning: logger.warning( "You are using a preview version of the client which is not recommended" " for production.\n" From 61324c3d602ebaed1b8f95b3fc9dc9dfc1196aa3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 21:30:15 +0100 Subject: [PATCH 06/19] Bump to version 0.0.7 --- pyproject.toml | 2 +- src/judge0/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8595aac9..e0f27ab2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "judge0" -version = "0.1.0.dev0" +version = "0.0.7" description = "The official Python SDK for Judge0." readme = "README.md" requires-python = ">=3.10" diff --git a/src/judge0/__init__.py b/src/judge0/__init__.py index 069c96ca..28a89f41 100644 --- a/src/judge0/__init__.py +++ b/src/judge0/__init__.py @@ -30,7 +30,7 @@ from .retry import MaxRetries, MaxWaitTime, RegularPeriodRetry from .submission import Submission -__version__ = "0.1.0.dev0" +__version__ = "0.0.7" __all__ = [ "ATD", From 78ef628afa416f9acfba6bbd5dca23e1d4037c31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 21:37:45 +0100 Subject: [PATCH 07/19] Prepare for next release --- pyproject.toml | 2 +- src/judge0/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e0f27ab2..8595aac9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "judge0" -version = "0.0.7" +version = "0.1.0.dev0" description = "The official Python SDK for Judge0." readme = "README.md" requires-python = ">=3.10" diff --git a/src/judge0/__init__.py b/src/judge0/__init__.py index 28a89f41..069c96ca 100644 --- a/src/judge0/__init__.py +++ b/src/judge0/__init__.py @@ -30,7 +30,7 @@ from .retry import MaxRetries, MaxWaitTime, RegularPeriodRetry from .submission import Submission -__version__ = "0.0.7" +__version__ = "0.1.0.dev0" __all__ = [ "ATD", From fcef3785ee00bd5ebc4f436d1996065a88a0cf5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 21:57:20 +0100 Subject: [PATCH 08/19] Update simple example with Ollama --- README.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c2dc8ae7..6976a0b2 100644 --- a/README.md +++ b/README.md @@ -246,8 +246,12 @@ client = Client( headers={"Authorization": "Bearer " + os.environ.get("OLLAMA_API_KEY")}, ) -system = "You are a helpful assistant that can execute Python code. Only respond with the code to be executed and nothing else. Strip backticks in code blocks." -prompt = "Calculate how many r's are in the word 'strawberry'." +system = """ +You are a helpful assistant that can execute code written in the C programming language. +Only respond with the code written in the C programming language that needs to be executed and nothing else. +Strip the backticks in code blocks. +""" +prompt = "How many r's are in the word 'strawberry'." response = client.chat( model="gpt-oss:120b-cloud", @@ -258,10 +262,10 @@ response = client.chat( ) code = response["message"]["content"] -print(f"Generated code:\n{code}") +print(f"CODE GENERATED BY THE MODEL:\n{code}\n") -result = judge0.run(source_code=code, language=judge0.PYTHON) -print(f"Execution result:\n{result.stdout}") +result = judge0.run(source_code=code, language=judge0.C) +print(f"CODE EXECUTION RESULT:\n{result.stdout}") ``` #### Tool Calling (a.k.a. Function Calling) with Ollama From bc2c693299081811858fe81dc07fe0e0637c881a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 22:58:16 +0100 Subject: [PATCH 09/19] Update Ollama examples --- README.md | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 6976a0b2..d88e865b 100644 --- a/README.md +++ b/README.md @@ -251,7 +251,7 @@ You are a helpful assistant that can execute code written in the C programming l Only respond with the code written in the C programming language that needs to be executed and nothing else. Strip the backticks in code blocks. """ -prompt = "How many r's are in the word 'strawberry'." +prompt = "How many r's are in the word 'strawberry'?" response = client.chat( model="gpt-oss:120b-cloud", @@ -284,20 +284,20 @@ client = Client( model="qwen3-coder:480b-cloud" messages=[ - {"role": "user", "content": "Calculate how many r's are in the word 'strawberry'."}, + {"role": "user", "content": "How many r's are in the word 'strawberry'?"}, ] tools = [{ "type": "function", "function": { - "name": "execute_python", - "description": "Execute Python code and return result", + "name": "execute_c", + "description": "Execute the C programming language code.", "parameters": { "type": "object", "properties": { "code": { "type": "string", - "description": "The Python code to execute" + "description": "The code written in the C programming language." } }, "required": ["code"] @@ -312,16 +312,21 @@ messages.append(response_message) if response_message.tool_calls: for tool_call in response_message.tool_calls: - if tool_call.function.name == "execute_python": - result = judge0.run(source_code=tool_call.function.arguments["code"], language=judge0.PYTHON) + if tool_call.function.name == "execute_c": + code = tool_call.function.arguments["code"] + print(f"CODE GENERATED BY THE MODEL:\n{code}\n") + + result = judge0.run(source_code=code, language=judge0.C) + print(f"CODE EXECUTION RESULT:\n{result.stdout}\n") + messages.append({ "role": "tool", - "tool_name": "execute_python", + "tool_name": "execute_c", "content": result.stdout, }) final_response = client.chat(model=model, messages=messages) -print(final_response["message"]["content"]) +print(f'FINAL RESPONSE BY THE MODEL:\n{final_response["message"]["content"]}') ``` ### Filesystem From 319a2a883aa63cf6dc99d1d98c9517c2560c0034 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 10 Nov 2025 23:16:54 +0100 Subject: [PATCH 10/19] Add comments for filesystem example --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index d88e865b..3f4f937f 100644 --- a/README.md +++ b/README.md @@ -331,6 +331,10 @@ print(f'FINAL RESPONSE BY THE MODEL:\n{final_response["message"]["content"]}') ### Filesystem +This example shows how to use Judge0 Python SDK to: +1. Create a submission with additional files in the filesystem which will be available during the execution. +2. Read the files after the execution which were created during the execution. + ```python import judge0 from judge0 import Filesystem, File, Submission @@ -355,6 +359,7 @@ submission = Submission( ) result = judge0.run(submissions=submission) + print(result.stdout) print(result.post_execution_filesystem.find("./my_dir2/my_file2.txt")) ``` From 1a1fd19d3c36c270488bbae31d5cd569b28065a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Sun, 16 Nov 2025 22:41:00 +0100 Subject: [PATCH 11/19] Add Multi-Agent System For Iterative Code Generation, Execution, And Debugging --- README.md | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/README.md b/README.md index 3f4f937f..9cccc218 100644 --- a/README.md +++ b/README.md @@ -236,6 +236,7 @@ print(client.get_languages()) #### Simple Example with Ollama ```python +# pip install judge0 ollama import os from ollama import Client import judge0 @@ -271,6 +272,7 @@ print(f"CODE EXECUTION RESULT:\n{result.stdout}") #### Tool Calling (a.k.a. Function Calling) with Ollama ```python +# pip install judge0 ollama import os from ollama import Client import judge0 @@ -329,6 +331,87 @@ final_response = client.chat(model=model, messages=messages) print(f'FINAL RESPONSE BY THE MODEL:\n{final_response["message"]["content"]}') ``` +#### Multi-Agent System For Iterative Code Generation, Execution, And Debugging + +```python +# pip install judge0 ag2[openai] +from typing import Annotated, Optional + +from autogen import ConversableAgent, LLMConfig, register_function +from autogen.tools import Tool +from pydantic import BaseModel, Field +import judge0 + + +class PythonCodeExecutionTool(Tool): + def __init__(self) -> None: + class CodeExecutionRequest(BaseModel): + code: Annotated[str, Field(description="Python code to execute")] + + async def execute_python_code( + code_execution_request: CodeExecutionRequest, + ) -> Optional[str]: + result = judge0.run( + source_code=code_execution_request.code, + language=judge0.PYTHON, + redirect_stderr_to_stdout=True, + ) + return result.stdout + + super().__init__( + name="python_execute_code", + description="Executes Python code and returns the result.", + func_or_tool=execute_python_code, + ) + + +python_executor = PythonCodeExecutionTool() + +llm_config = LLMConfig( + { + "api_type": "openai", + "base_url": "http://localhost:11434/v1", + "api_key": "ollama", + "model": "qwen3-coder:30b", + } +) + +code_runner = ConversableAgent( + name="code_runner", + system_message="You are a code executor agent, when you don't execute code write the message 'TERMINATE' by itself.", + human_input_mode="NEVER", + llm_config=llm_config, +) + +question_agent = ConversableAgent( + name="question_agent", + system_message=( + "You are a developer AI agent. " + "Send all your code suggestions to the python_executor tool where it will be executed and result returned to you. " + "Keep refining the code until it works." + ), + llm_config=llm_config, +) + +register_function( + python_executor, + caller=question_agent, + executor=code_runner, + description="Run Python code", +) + +result = code_runner.initiate_chat( + recipient=question_agent, + message=( + "Write Python code to print the current Python version followed by the numbers 1 to 11. " + "Make a syntax error in the first version and fix it in the second version." + ), + max_turns=5, +) + +print(f"Result: {result.summary}") +``` + ### Filesystem This example shows how to use Judge0 Python SDK to: From a0f21c4737836e58aa0c49092a9b0b986939b1cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Sun, 16 Nov 2025 22:49:40 +0100 Subject: [PATCH 12/19] Update example to work with Ollama Cloud --- README.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9cccc218..27d674dc 100644 --- a/README.md +++ b/README.md @@ -238,10 +238,11 @@ print(client.get_languages()) ```python # pip install judge0 ollama import os + from ollama import Client import judge0 -# Get your Ollama Cloud API key from https://ollama.com. +# Get your free tier Ollama Cloud API key at https://ollama.com. client = Client( host="https://ollama.com", headers={"Authorization": "Bearer " + os.environ.get("OLLAMA_API_KEY")}, @@ -274,10 +275,11 @@ print(f"CODE EXECUTION RESULT:\n{result.stdout}") ```python # pip install judge0 ollama import os + from ollama import Client import judge0 -# Get your Ollama Cloud API key from https://ollama.com. +# Get your free tier Ollama Cloud API key at https://ollama.com. client = Client( host="https://ollama.com", headers={"Authorization": "Bearer " + os.environ.get("OLLAMA_API_KEY")}, @@ -335,6 +337,7 @@ print(f'FINAL RESPONSE BY THE MODEL:\n{final_response["message"]["content"]}') ```python # pip install judge0 ag2[openai] +import os from typing import Annotated, Optional from autogen import ConversableAgent, LLMConfig, register_function @@ -367,12 +370,13 @@ class PythonCodeExecutionTool(Tool): python_executor = PythonCodeExecutionTool() +# Get your free tier Ollama Cloud API key at https://ollama.com. llm_config = LLMConfig( { "api_type": "openai", - "base_url": "http://localhost:11434/v1", - "api_key": "ollama", - "model": "qwen3-coder:30b", + "base_url": "https://ollama.com/v1", + "api_key": os.environ.get("OLLAMA_API_KEY"), + "model": "qwen3-coder:480b-cloud", } ) From 177ff092a5aebe20f536c61bb7aefa6e33e94e3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 17 Nov 2025 00:14:53 +0100 Subject: [PATCH 13/19] Add Kaggle Dataset Visualization With LLM-Generated Code Using Ollama And Judge0 --- README.md | 106 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 104 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 27d674dc..7d15228e 100644 --- a/README.md +++ b/README.md @@ -233,7 +233,7 @@ print(client.get_languages()) ### Running LLM-Generated Code -#### Simple Example with Ollama +#### Simple Example With Ollama ```python # pip install judge0 ollama @@ -270,7 +270,7 @@ result = judge0.run(source_code=code, language=judge0.C) print(f"CODE EXECUTION RESULT:\n{result.stdout}") ``` -#### Tool Calling (a.k.a. Function Calling) with Ollama +#### Tool Calling (a.k.a. Function Calling) With Ollama ```python # pip install judge0 ollama @@ -416,6 +416,108 @@ result = code_runner.initiate_chat( print(f"Result: {result.summary}") ``` +#### Kaggle Dataset Visualization With LLM-Generated Code Using Ollama And Judge0 + +```python +# pip install judge0 ollama requests +import os +import zipfile + +import judge0 +import requests +from judge0 import File, Filesystem +from ollama import Client + +# Step 1: Download the dataset from Kaggle. +dataset_url = "https://www.kaggle.com/api/v1/datasets/download/gregorut/videogamesales" +dataset_zip_path = "vgsales.zip" +dataset_csv_path = "vgsales.csv" # P.S.: We know the CSV file name inside the zip. + +if not os.path.exists(dataset_csv_path): # Download only if not already downloaded. + with requests.get(dataset_url) as response: + with open(dataset_zip_path, "wb") as f: + f.write(response.content) + with zipfile.ZipFile(dataset_zip_path, "r") as f: + f.extractall(".") + +# Step 2: Prepare the submission for Judge0. +with open(dataset_csv_path, "r") as f: + submission = judge0.Submission( + language=judge0.PYTHON_FOR_ML, + additional_files=Filesystem( + content=[ + File(name=dataset_csv_path, content=f.read()), + ] + ), + ) + +# Step 3: Initialize Ollama Client. Get your free tier Ollama Cloud API key at https://ollama.com. +client = Client( + host="https://ollama.com", + headers={"Authorization": "Bearer " + os.environ.get("OLLAMA_API_KEY")}, +) + +# Step 4: Prepare the prompt, messages, tools, and choose the model. +prompt = f""" +I have a CSV that contains a list of video games with sales greater than 100,000 copies. It's saved in the file {dataset_csv_path}. +These are the columns: +- 'Rank': Ranking of overall sales +- 'Name': The games name +- 'Platform': Platform of the games release (i.e. PC,PS4, etc.) +- 'Year': Year of the game's release +- 'Genre': Genre of the game +- 'Publisher': Publisher of the game +- 'NA_Sales': Sales in North America (in millions) +- 'EU_Sales': Sales in Europe (in millions) +- 'JP_Sales': Sales in Japan (in millions) +- 'Other_Sales': Sales in the rest of the world (in millions) +- 'Global_Sales': Total worldwide sales. + +I want to better understand how the sales are distributed across different genres over the years. +Write Python code that analyzes the dataset based on my request, produces right chart and saves it as an image file. +""" +messages = [{"role": "user", "content": prompt}] +tools = [ + { + "type": "function", + "function": { + "name": "execute_python", + "description": "Execute the Python programming language code.", + "parameters": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The code written in the Python programming language.", + } + }, + "required": ["code"], + }, + }, + } +] +model = "qwen3-coder:480b-cloud" + +# Step 5: Start the interaction with the model. +response = client.chat(model=model, messages=messages, tools=tools) +response_message = response["message"] + +if response_message.tool_calls: + for tool_call in response_message.tool_calls: + if tool_call.function.name == "execute_python": + code = tool_call.function.arguments["code"] + print(f"CODE GENERATED BY THE MODEL:\n{code}\n") + + submission.source_code = code + result = judge0.run(submissions=submission) + + for f in result.post_execution_filesystem: + if f.name.endswith((".png", ".jpg", ".jpeg")): + with open(f.name, "wb") as img_file: + img_file.write(f.content) + print(f"Generated image saved as: {f.name}\n") +``` + ### Filesystem This example shows how to use Judge0 Python SDK to: From c4016fb07c1555eb623f8b42f019e9bc38ea8e6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 17 Nov 2025 01:57:38 +0100 Subject: [PATCH 14/19] Add Minimal Example Using smolagents With Ollama And Judge0 --- README.md | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/README.md b/README.md index 7d15228e..1f811139 100644 --- a/README.md +++ b/README.md @@ -518,6 +518,49 @@ if response_message.tool_calls: print(f"Generated image saved as: {f.name}\n") ``` +#### Minimal Example Using `smolagents` With Ollama And Judge0 + +```python +# pip install judge0 smolagents[openai] +import os +from typing import Any + +import judge0 +from smolagents import CodeAgent, OpenAIServerModel, Tool +from smolagents.local_python_executor import CodeOutput, PythonExecutor + + +class Judge0PythonExecutor(PythonExecutor): + def send_tools(self, tools: dict[str, Tool]) -> None: + pass + + def send_variables(self, variables: dict[str, Any]) -> None: + pass + + def __call__(self, code_action: str) -> CodeOutput: + source_code = f"final_answer = lambda x : print(x)\n{code_action}" + result = judge0.run(source_code=source_code, language=judge0.PYTHON_FOR_ML) + return CodeOutput( + output=result.stdout, + logs=result.stderr or "", + is_final_answer=result.exit_code == 0, + ) + + +# Get your free tier Ollama Cloud API key at https://ollama.com. +model = OpenAIServerModel( + model_id="gpt-oss:120b-cloud", + api_base="https://ollama.com/v1", + api_key=os.environ["OLLAMA_API_KEY"], +) + +agent = CodeAgent(tools=[], model=model) +agent.python_executor = Judge0PythonExecutor() + +result = agent.run("How many r's are in the word 'strawberry'?") +print(result) +``` + ### Filesystem This example shows how to use Judge0 Python SDK to: From 0eea3ac7a1264cf47d777d6e6459ab33492fc2c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Herman=20Zvonimir=20Do=C5=A1ilovi=C4=87?= Date: Mon, 17 Nov 2025 02:07:26 +0100 Subject: [PATCH 15/19] Add example Generating And Saving An Image File --- README.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/README.md b/README.md index 1f811139..71f241d5 100644 --- a/README.md +++ b/README.md @@ -568,6 +568,7 @@ This example shows how to use Judge0 Python SDK to: 2. Read the files after the execution which were created during the execution. ```python +# pip install judge0 import judge0 from judge0 import Filesystem, File, Submission @@ -601,6 +602,7 @@ print(result.post_execution_filesystem.find("./my_dir2/my_file2.txt")) This example shows how to use Judge0 Python SDK with your own Judge0 instance. ```python +# pip install judge0 import judge0 client = judge0.Client("http://127.0.0.1:2358") @@ -617,3 +619,24 @@ int main() { result = judge0.run(client=client, source_code=source_code, language=judge0.C) print(result.stdout) ``` + +### Generating And Saving An Image File + +```python +# pip install judge0 +import judge0 + +source_code = """ +import matplotlib.pyplot as plt + +plt.plot([x for x in range(10)], [x**2 for x in range(10)]) +plt.savefig("chart.png") +""" + +result = judge0.run(source_code=source_code, language=judge0.PYTHON_FOR_ML) + +image = result.post_execution_filesystem.find("chart.png") +with open(image.name, "wb") as f: + f.write(image.content) +print(f"Generated image saved as: {image.name}\n") +``` From 0847529f78f72ed77a6e39866f4bc638e2ef5198 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Karlo=20Do=C5=A1ilovi=C4=87?= Date: Fri, 28 Nov 2025 18:25:01 +0100 Subject: [PATCH 16/19] Filter only release tag versions (without prereleases). (#28) --- .github/workflows/docs.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 182c3f95..de80d4e6 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -17,8 +17,12 @@ jobs: fetch-depth: 0 # Fetch the full history ref: ${{ github.ref }} # Check out the current branch or tag - - name: Fetch tags only - run: git fetch --tags --no-recurse-submodules + - name: Fetch version tags only (vx.y.z) + run: | + TAGS=$(git ls-remote --tags origin 'v*[0-9].[0-9].[0-9]' | sed 's/.*refs\/tags\///' | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$') + for tag in $TAGS; do + git fetch origin "refs/tags/$tag:refs/tags/$tag" --no-recurse-submodules + done - name: Set up Python uses: actions/setup-python@v4 @@ -33,12 +37,12 @@ jobs: - name: Build documentation run: uv run sphinx-multiversion docs/source docs/build/html --keep-going --no-color - - name: Get the latest tag + - name: Get the latest stable tag run: | # Fetch all tags git fetch --tags - # Get the latest tag - latest_tag=$(git tag --sort=-creatordate | head -n 1) + # Get the latest stable tag + latest_tag=$(git tag --sort=-creatordate | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | head -n 1) echo "LATEST_RELEASE=$latest_tag" >> $GITHUB_ENV - name: Generate index.html for judge0.github.io/judge0-python. From 38b13f961cbf3389d20a17aa21c336430b7f625c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Karlo=20Do=C5=A1ilovi=C4=87?= Date: Fri, 28 Nov 2025 18:43:32 +0100 Subject: [PATCH 17/19] Add tag filtering for sphinx-multiversion config as well. --- docs/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index f4c02c87..4ab5eccb 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -102,7 +102,7 @@ napoleon_google_docstring = False # Whitelist pattern for tags (set to None to ignore all tags) -smv_tag_whitelist = r"^.*$" +smv_tag_whitelist = r"^v[0-9]+\.[0-9]+\.[0-9]+$" # Whitelist pattern for branches (set to None to ignore all branches) smv_branch_whitelist = r"^master$" # Whitelist pattern for remotes (set to None to use local branches only) From 48df1b960a36c4c1f07908fcbae20f4d0eb17bbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Karlo=20Do=C5=A1ilovi=C4=87?= Date: Mon, 1 Dec 2025 11:22:17 +0100 Subject: [PATCH 18/19] Add docs for client resolution. (#30) --- docs/source/in_depth/client_resolution.rst | 53 +++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) diff --git a/docs/source/in_depth/client_resolution.rst b/docs/source/in_depth/client_resolution.rst index dfdd5321..808a705b 100644 --- a/docs/source/in_depth/client_resolution.rst +++ b/docs/source/in_depth/client_resolution.rst @@ -1,4 +1,55 @@ Client Resolution ================= -TODO: Describe the approach to client resolution. See `_get_implicit_client`. \ No newline at end of file +The Judge0 Python SDK supports two main client flavors: Community Edition (CE) and Extra Community Edition (Extra CE), which correspond to the Judge0 Cloud editions. When you use functions like ``judge0.run`` or ``judge0.execute`` without explicitly providing a client instance, the SDK automatically resolves and initializes a client for you. This process, handled by the internal ``_get_implicit_client`` function, follows a specific order of priority. + +The SDK determines which client to use based on environment variables. Here's the resolution order: + +1. **Custom Client (Self-Hosted)** + +The SDK first checks for a self-hosted Judge0 instance. If you have your own deployment of Judge0, you can configure the SDK to use it by setting the following environment variables: + +* For CE flavor: + * ``JUDGE0_CE_ENDPOINT``: The URL of your self-hosted Judge0 CE instance. + * ``JUDGE0_CE_AUTH_HEADERS``: A JSON string representing the authentication headers. +* For Extra CE flavor: + * ``JUDGE0_EXTRA_CE_ENDPOINT``: The URL of your self-hosted Judge0 Extra CE instance. + * ``JUDGE0_EXTRA_CE_AUTH_HEADERS``: A JSON string representing the authentication headers. + +If these variables are set, the SDK will initialize a ``Client`` instance with your custom endpoint and headers. + +2. **Hub Clients** + +If a custom client is not configured, the SDK will try to find API keys for one of the supported hub clients. The SDK checks for the following environment variables in order: + +* **Judge0 Cloud**: + * ``JUDGE0_CLOUD_CE_AUTH_HEADERS`` for ``Judge0CloudCE`` + * ``JUDGE0_CLOUD_EXTRA_CE_AUTH_HEADERS` for ``Judge0CloudExtraCE`` + +* **RapidAPI**: + * ``JUDGE0_RAPID_API_KEY`` for both ``RapidJudge0CE`` and ``RapidJudge0ExtraCE`` + +* **AllThingsDev**: + * ``JUDGE0_ATD_API_KEY`` for both ``ATDJudge0CE`` and ``ATDJudge0ExtraCE`` + +The first API key found determines the client that will be used. + +3. **Preview Client** + +If none of the above environment variables are set, the SDK falls back to using a **preview client**. This is an unauthenticated client that connects to the official Judge0 Cloud service. It initializes ``Judge0CloudCE()`` and ``Judge0CloudExtraCE()`` for the CE and Extra CE flavors, respectively. + +When the preview client is used, a warning message is logged to the console, as this option is not recommended for production use. To suppress this warning, you can set the ``JUDGE0_SUPPRESS_PREVIEW_WARNING`` environment variable. + +Example Resolution Flow +----------------------- + +When you call a function like ``judge0.run(..., flavor=judge0.CE)``, the SDK will: + +1. Check if ``JUDGE0_IMPLICIT_CE_CLIENT`` is already initialized. If so, use it. +2. Check for ``JUDGE0_CE_ENDPOINT`` and ``JUDGE0_CE_AUTH_HEADERS`` to configure a ``Client``. +3. Check for ``JUDGE0_CLOUD_CE_AUTH_HEADERS`` to configure a ``Judge0CloudCE`` client. +4. Check for ``JUDGE0_RAPID_API_KEY`` to configure a ``RapidJudge0CE`` client. +5. Check for ``JUDGE0_ATD_API_KEY`` to configure an ``ATDJudge0CE`` client. +6. If none of the above are found, initialize a preview ``Judge0CloudCE`` client and log a warning. + +This implicit client resolution makes it easy to get started with the Judge0 Python SDK while providing the flexibility to configure it for different environments and services. From afb0f24630c361dbc78c8b3bd9f9b085db496086 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20Karlo=20Do=C5=A1ilovi=C4=87?= Date: Sat, 13 Dec 2025 11:32:38 +0100 Subject: [PATCH 19/19] Add step to generate CNAME file for documentation --- .github/workflows/docs.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index de80d4e6..f368c870 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -56,6 +56,9 @@ jobs: env: latest_release: ${{ env.LATEST_RELEASE }} + - name: Generate CNAME file + run: echo "python.docs.judge0.com" > docs/build/html/CNAME + - name: Upload artifacts uses: actions/upload-artifact@v4 with: