add job queue and code submission tests; remove unused main executable

This commit is contained in:
2025-03-25 23:05:26 +05:30
parent 67b39f3275
commit 574f754940
10 changed files with 1434 additions and 275 deletions

View File

@@ -0,0 +1,195 @@
package tests
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/arnab-afk/monaco/handler"
"github.com/stretchr/testify/assert"
)
func setupTestServer() *httptest.Server {
h := handler.NewHandler()
mux := http.NewServeMux()
mux.HandleFunc("/submit", h.SubmitHandler)
mux.HandleFunc("/status", h.StatusHandler)
mux.HandleFunc("/result", h.ResultHandler)
mux.HandleFunc("/queue-stats", h.QueueStatsHandler)
return httptest.NewServer(mux)
}
func TestAPIIntegration(t *testing.T) {
server := setupTestServer()
defer server.Close()
// Test: Submit code, check status, and get results
// 1. Submit a Python job
submitURL := server.URL + "/submit"
body := map[string]string{
"language": "python",
"code": "print('Hello, Integration Test!')",
}
bodyBytes, _ := json.Marshal(body)
resp, err := http.Post(submitURL, "application/json", bytes.NewReader(bodyBytes))
assert.NoError(t, err)
assert.Equal(t, http.StatusAccepted, resp.StatusCode)
// Get the job ID
var submitResp map[string]string
json.NewDecoder(resp.Body).Decode(&submitResp)
resp.Body.Close()
jobID := submitResp["id"]
assert.NotEmpty(t, jobID)
// 2. Check status
statusURL := server.URL + "/status?id=" + jobID
// Wait for job to complete (try multiple times)
var statusResp map[string]interface{}
maxRetries := 10
for i := 0; i < maxRetries; i++ {
resp, err = http.Get(statusURL)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
json.NewDecoder(resp.Body).Decode(&statusResp)
resp.Body.Close()
// If job completed or failed, break
status, _ := statusResp["status"].(string)
if status == "completed" || status == "failed" {
break
}
// Wait before next retry
time.Sleep(200 * time.Millisecond)
}
// 3. Get results
resultURL := server.URL + "/result?id=" + jobID
resp, err = http.Get(resultURL)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
var resultResp map[string]interface{}
json.NewDecoder(resp.Body).Decode(&resultResp)
resp.Body.Close()
assert.Equal(t, jobID, resultResp["id"])
assert.Contains(t, resultResp["output"], "Hello, Integration Test!")
// 4. Check queue stats
statsURL := server.URL + "/queue-stats"
resp, err = http.Get(statsURL)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
var statsResp map[string]interface{}
json.NewDecoder(resp.Body).Decode(&statsResp)
resp.Body.Close()
assert.Contains(t, statsResp, "queue_stats")
assert.Contains(t, statsResp, "submissions")
}
func TestMultipleLanguageSubmissions(t *testing.T) {
server := setupTestServer()
defer server.Close()
// Test submissions for different languages
languages := []string{"python", "java", "c", "cpp"}
codes := map[string]string{
"python": "print('Hello from Python')",
"java": "public class Solution { public static void main(String[] args) { System.out.println(\"Hello from Java\"); } }",
"c": "#include <stdio.h>\nint main() { printf(\"Hello from C\\n\"); return 0; }",
"cpp": "#include <iostream>\nint main() { std::cout << \"Hello from C++\" << std::endl; return 0; }",
}
submitURL := server.URL + "/submit"
for _, lang := range languages {
body := map[string]string{
"language": lang,
"code": codes[lang],
}
bodyBytes, _ := json.Marshal(body)
resp, err := http.Post(submitURL, "application/json", bytes.NewReader(bodyBytes))
assert.NoError(t, err)
assert.Equal(t, http.StatusAccepted, resp.StatusCode)
var submitResp map[string]string
json.NewDecoder(resp.Body).Decode(&submitResp)
resp.Body.Close()
jobID := submitResp["id"]
assert.NotEmpty(t, jobID)
// We don't wait for completion in this test
// This is just to verify submission acceptance for all languages
}
}
func TestInputHandling(t *testing.T) {
server := setupTestServer()
defer server.Close()
// Test code submission with input
submitURL := server.URL + "/submit"
body := map[string]string{
"language": "python",
"code": "name = input('Enter name: ')\nprint('Hello, ' + name + '!')",
"input": "Integration Test",
}
bodyBytes, _ := json.Marshal(body)
resp, err := http.Post(submitURL, "application/json", bytes.NewReader(bodyBytes))
assert.NoError(t, err)
assert.Equal(t, http.StatusAccepted, resp.StatusCode)
var submitResp map[string]string
json.NewDecoder(resp.Body).Decode(&submitResp)
resp.Body.Close()
jobID := submitResp["id"]
assert.NotEmpty(t, jobID)
// Wait for job to complete and check result
resultURL := server.URL + "/result?id=" + jobID
// Poll for results
var resultResp map[string]interface{}
maxRetries := 10
for i := 0; i < maxRetries; i++ {
time.Sleep(300 * time.Millisecond)
resp, err = http.Get(resultURL)
assert.NoError(t, err)
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
continue
}
json.NewDecoder(resp.Body).Decode(&resultResp)
resp.Body.Close()
status, _ := resultResp["status"].(string)
if status == "completed" || status == "failed" {
break
}
}
// Verify output contains the greeting with input
assert.Contains(t, resultResp["output"], "Hello, Integration Test!")
}

278
backend/tests/load_test.py Normal file
View File

@@ -0,0 +1,278 @@
import requests
import concurrent.futures
import time
import statistics
import matplotlib.pyplot as plt
import numpy as np
# Define the endpoint URLs
POST_URL = "http://localhost:8080/submit"
GET_URL_STATUS = "http://localhost:8080/status?id={}"
GET_URL_RESULT = "http://localhost:8080/result?id={}"
GET_URL_STATS = "http://localhost:8080/queue-stats"
# Test payloads for different languages
PAYLOADS = {
"python": {
"language": "python",
"code": "print('Hello, Load Test!')",
},
"java": {
"language": "java",
"code": "public class Solution { public static void main(String[] args) { System.out.println(\"Hello, Load Test!\"); } }",
},
"c": {
"language": "c",
"code": "#include <stdio.h>\nint main() { printf(\"Hello, Load Test!\\n\"); return 0; }",
},
"cpp": {
"language": "cpp",
"code": "#include <iostream>\nint main() { std::cout << \"Hello, Load Test!\" << std::endl; return 0; }",
}
}
def send_request(language, index):
"""Sends a POST request and returns (task_id, time_taken)."""
payload = PAYLOADS[language]
start_time = time.time()
try:
response = requests.post(POST_URL, json=payload, timeout=10)
end_time = time.time()
if response.status_code == 202:
return response.json().get("id"), end_time - start_time
else:
print(f"Request {index} failed with status {response.status_code}")
return None, end_time - start_time
except requests.exceptions.RequestException as e:
end_time = time.time()
print(f"Request {index} error: {e}")
return None, end_time - start_time
def wait_for_result(task_id, index):
"""Waits for a result and returns (result, time_taken)."""
if not task_id:
return None, 0
start_time = time.time()
max_retries = 30
retry_interval = 0.5 # seconds
for _ in range(max_retries):
try:
response = requests.get(GET_URL_RESULT.format(task_id), timeout=10)
if response.status_code == 200:
result = response.json()
if result.get("status") in ["completed", "failed"]:
end_time = time.time()
return result, end_time - start_time
time.sleep(retry_interval)
except requests.exceptions.RequestException as e:
print(f"Error checking result for task {index}: {e}")
end_time = time.time()
print(f"Timed out waiting for result of task {index}")
return None, end_time - start_time
def run_test(concurrency, requests_per_language):
"""Runs a load test with the specified parameters."""
languages = list(PAYLOADS.keys())
all_results = {lang: [] for lang in languages}
submit_times = {lang: [] for lang in languages}
wait_times = {lang: [] for lang in languages}
success_rates = {lang: 0 for lang in languages}
# Keep track of all submissions for each language
total_per_language = {lang: 0 for lang in languages}
successful_per_language = {lang: 0 for lang in languages}
start_time = time.time()
# Create a list of tasks
tasks = []
for lang in languages:
for i in range(requests_per_language):
tasks.append((lang, i))
print(f"Running load test with {concurrency} concurrent connections")
print(f"Sending {requests_per_language} requests per language ({len(languages)} languages)")
# Submit all tasks
task_ids = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
future_to_task = {executor.submit(send_request, lang, i): (lang, i) for lang, i in tasks}
for future in concurrent.futures.as_completed(future_to_task):
lang, i = future_to_task[future]
total_per_language[lang] += 1
try:
task_id, submit_time = future.result()
if task_id:
task_ids[(lang, i)] = task_id
submit_times[lang].append(submit_time)
except Exception as e:
print(f"Error submitting {lang} task {i}: {e}")
print(f"Submitted {len(task_ids)} tasks successfully")
# Wait for all results
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
future_to_task = {executor.submit(wait_for_result, task_ids.get((lang, i)), i): (lang, i)
for lang, i in tasks if (lang, i) in task_ids}
for future in concurrent.futures.as_completed(future_to_task):
lang, i = future_to_task[future]
try:
result, wait_time = future.result()
if result and result.get("status") == "completed":
successful_per_language[lang] += 1
all_results[lang].append(result)
wait_times[lang].append(wait_time)
except Exception as e:
print(f"Error waiting for {lang} task {i}: {e}")
end_time = time.time()
total_time = end_time - start_time
# Calculate success rates
for lang in languages:
if total_per_language[lang] > 0:
success_rates[lang] = (successful_per_language[lang] / total_per_language[lang]) * 100
else:
success_rates[lang] = 0
# Calculate statistics
stats = {
"total_time": total_time,
"requests_per_second": len(task_ids) / total_time if total_time > 0 else 0,
"success_rate": sum(success_rates.values()) / len(success_rates) if success_rates else 0,
"submit_times": {
lang: {
"avg": statistics.mean(times) if times else 0,
"min": min(times) if times else 0,
"max": max(times) if times else 0,
"p95": np.percentile(times, 95) if times else 0
} for lang, times in submit_times.items()
},
"wait_times": {
lang: {
"avg": statistics.mean(times) if times else 0,
"min": min(times) if times else 0,
"max": max(times) if times else 0,
"p95": np.percentile(times, 95) if times else 0
} for lang, times in wait_times.items()
},
"success_rates": success_rates
}
return stats, all_results
def print_stats(stats):
"""Prints test statistics."""
print("\n=== Load Test Results ===")
print(f"Total time: {stats['total_time']:.2f}s")
print(f"Requests per second: {stats['requests_per_second']:.2f}")
print(f"Overall success rate: {stats['success_rate']:.2f}%")
print("\n== Submit Times (seconds) ==")
for lang, times in stats["submit_times"].items():
print(f"{lang:<6}: avg={times['avg']:.4f}, min={times['min']:.4f}, max={times['max']:.4f}, p95={times['p95']:.4f}")
print("\n== Wait Times (seconds) ==")
for lang, times in stats["wait_times"].items():
print(f"{lang:<6}: avg={times['avg']:.4f}, min={times['min']:.4f}, max={times['max']:.4f}, p95={times['p95']:.4f}")
print("\n== Success Rates ==")
for lang, rate in stats["success_rates"].items():
print(f"{lang:<6}: {rate:.2f}%")
def plot_results(stats):
"""Creates visualizations of test results."""
languages = list(stats["submit_times"].keys())
# Plot submit times
plt.figure(figsize=(12, 10))
plt.subplot(2, 2, 1)
plt.title("Average Submit Time by Language")
avg_times = [stats["submit_times"][lang]["avg"] for lang in languages]
plt.bar(languages, avg_times)
plt.ylabel("Time (seconds)")
plt.subplot(2, 2, 2)
plt.title("Average Wait Time by Language")
avg_wait_times = [stats["wait_times"][lang]["avg"] for lang in languages]
plt.bar(languages, avg_wait_times)
plt.ylabel("Time (seconds)")
plt.subplot(2, 2, 3)
plt.title("Success Rate by Language")
success_rates = [stats["success_rates"][lang] for lang in languages]
plt.bar(languages, success_rates)
plt.ylabel("Success Rate (%)")
plt.ylim(0, 100)
plt.subplot(2, 2, 4)
plt.title("95th Percentile Wait Time by Language")
p95_times = [stats["wait_times"][lang]["p95"] for lang in languages]
plt.bar(languages, p95_times)
plt.ylabel("Time (seconds)")
plt.tight_layout()
plt.savefig("load_test_results.png")
print("Results saved to load_test_results.png")
def main():
# Run tests with different concurrency levels
concurrency_levels = [10, 20, 30]
requests_per_language = 10
all_stats = []
for concurrency in concurrency_levels:
stats, results = run_test(concurrency, requests_per_language)
all_stats.append((concurrency, stats))
print_stats(stats)
# Create comparison visualization
plt.figure(figsize=(12, 8))
plt.subplot(2, 2, 1)
plt.title("Requests per Second vs Concurrency")
plt.plot([s[0] for s in all_stats], [s[1]["requests_per_second"] for s in all_stats], "o-")
plt.xlabel("Concurrency Level")
plt.ylabel("Requests per Second")
plt.subplot(2, 2, 2)
plt.title("Success Rate vs Concurrency")
plt.plot([s[0] for s in all_stats], [s[1]["success_rate"] for s in all_stats], "o-")
plt.xlabel("Concurrency Level")
plt.ylabel("Success Rate (%)")
plt.ylim(0, 100)
plt.subplot(2, 2, 3)
plt.title("Average Submit Time vs Concurrency")
for lang in PAYLOADS.keys():
plt.plot([s[0] for s in all_stats],
[s[1]["submit_times"][lang]["avg"] for s in all_stats],
"o-", label=lang)
plt.xlabel("Concurrency Level")
plt.ylabel("Average Submit Time (s)")
plt.legend()
plt.subplot(2, 2, 4)
plt.title("Average Wait Time vs Concurrency")
for lang in PAYLOADS.keys():
plt.plot([s[0] for s in all_stats],
[s[1]["wait_times"][lang]["avg"] for s in all_stats],
"o-", label=lang)
plt.xlabel("Concurrency Level")
plt.ylabel("Average Wait Time (s)")
plt.legend()
plt.tight_layout()
plt.savefig("concurrency_comparison.png")
print("Concurrency comparison saved to concurrency_comparison.png")
# Plot detailed results for the highest concurrency test
plot_results(all_stats[-1][1])
if __name__ == "__main__":
main()