From 67b39f327519e285766ad2e77e65e74140656a08 Mon Sep 17 00:00:00 2001 From: Arnab-Afk Date: Tue, 25 Mar 2025 22:44:21 +0530 Subject: [PATCH] increase concurrent execution limit and add performance testing script --- backend/service/execution.go | 2 +- backend/test.py | 96 +++++++++++++++++++++++++++++++++++ backend/tmp/main.exe | Bin 8596480 -> 8596480 bytes 3 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 backend/test.py diff --git a/backend/service/execution.go b/backend/service/execution.go index 914c2b4..d7856f5 100644 --- a/backend/service/execution.go +++ b/backend/service/execution.go @@ -26,7 +26,7 @@ type ExecutionService struct { func NewExecutionService() *ExecutionService { log.Println("Initializing execution service with 3 concurrent workers") return &ExecutionService{ - queue: queue.NewJobQueue(3), // 3 concurrent executions max + queue: queue.NewJobQueue(35), // 3 concurrent executions max } } diff --git a/backend/test.py b/backend/test.py new file mode 100644 index 0000000..c88eb07 --- /dev/null +++ b/backend/test.py @@ -0,0 +1,96 @@ +import requests +import concurrent.futures +import time + +# Define the endpoint URLs +POST_URL = "http://localhost:8080/submit" +GET_URL = "http://localhost:8080/result?id={}" + +# Define the request bodies +cpp_payload = { + "language": "cpp", + "code": """#include \n#include \n\nint main() {\n std::string name;\n std::cout << \"Enter your name: \";\n std::cin >> name;\n std::cout << \"Hello, \" << name << \"!\" << std::endl;\n return 0;\n}""", + "input": "Alice" +} + +java_payload = { + "language": "java", + "code": """import java.util.Scanner;\n\npublic class Solution {\n public static void main(String[] args) {\n Scanner scanner = new Scanner(System.in);\n System.out.print(\"Enter your name: \");\n String name = scanner.nextLine();\n System.out.println(\"Hello, \" + name + \"!\");\n scanner.close();\n }\n}""", + "input": "Jane" +} + +def send_request(index): + """Sends a POST request and returns the task ID.""" + payload = cpp_payload if index % 2 == 0 else java_payload + for _ in range(3): # Retry up to 3 times + try: + response = requests.post(POST_URL, json=payload, timeout=10) + if response.status_code == 200: + task_id = response.json().get("id") + print(f"Request {index} sent. Task ID: {task_id}") + return task_id + except requests.exceptions.RequestException as e: + print(f"Request {index} failed: {e}") + time.sleep(1) + return None + +def get_result(task_id): + """Polls the result endpoint until completion.""" + if not task_id: + return None + max_retries = 50 # Prevent infinite loop + retries = 0 + while retries < max_retries: + try: + response = requests.get(GET_URL.format(task_id), timeout=10) + if response.status_code == 200: + result = response.json() + if result.get("status") == "completed": + print(f"Task {task_id} completed.") + return result + time.sleep(1) # Poll every second + retries += 1 + except requests.exceptions.RequestException as e: + print(f"Error fetching result for {task_id}: {e}") + print(f"Task {task_id} did not complete in time.") + return None + +def main(): + start_time = time.time() + task_ids = [] + + print("Sending 500 requests...") + + # Send 500 requests concurrently + with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor: + futures = {executor.submit(send_request, i): i for i in range(500)} + for future in concurrent.futures.as_completed(futures): + task_id = future.result() + if task_id: + task_ids.append(task_id) + + print(f"Sent {len(task_ids)} requests. Waiting for results...") + + # Fetch results + results = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor: + futures = {executor.submit(get_result, task_id): task_id for task_id in task_ids} + for future in concurrent.futures.as_completed(futures): + result = future.result() + if result: + results.append(result) + + # Calculate execution stats + total_time = time.time() - start_time + waiting_times = [r["totalTime"] for r in results if "totalTime" in r] + avg_waiting_time = sum(waiting_times) / len(waiting_times) if waiting_times else 0 + + print("\nExecution Stats:") + print(f"Total Execution Time: {total_time:.2f}s") + print(f"Total Requests Processed: {len(results)}/{len(task_ids)}") + print(f"Average Waiting Time: {avg_waiting_time:.2f}ms") + print(f"Min Waiting Time: {min(waiting_times, default=0)}ms") + print(f"Max Waiting Time: {max(waiting_times, default=0)}ms") + +if __name__ == "__main__": + main() diff --git a/backend/tmp/main.exe b/backend/tmp/main.exe index bd6e1972069135c085b23de2ff85f19fad453672..10e1644ca6f4f685258208588613e479ed629ee4 100644 GIT binary patch delta 762 zcmbWx+fov70D$q4jEYWnpwhBTJ4jZ$|78zIMJVYeBx(qSon$wb<3^xGbX@a-o9=4H znP!^t!UyP%+h%%+9;dHY=;C+tU47o4FZ~{V4)}A~vT#LvDWk~xO2e#9PnmUBa!R#w zd(Ma~7==bIZ)EI#a^YIa7AcwPtf&O4TZEtCh;nn^h4`SCX@lQf4$T z@^w`j+VuQg&))|hcA+1;@vrZ}UhG2v`*8pVaR`SofFn4HV>pf=PT(X?VGu(&jWZa= zS)9WN&SMl~xPXhegv%K3dH&G1AAMup;7$lvFoCPMhU<_ZLqI_oDm3UY5W!?GxDyS2 zAJ=WYUmsM5BLa_|tXpXLL4g0c#0c~06QK3bZ$*s6AJJH>a z-M~%ULJYSN#}uZKz#YtB7AEdu4oReN5A#T40gG6|ePnvuvE}UV|NiXqi5wo_A@W$k zBRs|vJjFAtq5umvo}&l{C6rM?6*bi1;sqLbi6&ZD!#ZB!HQIQC4&LG&-s1y4;uAb< M;BznT+x!*!3-%rH$N&HU delta 762 zcmbWxOH&d70KoAPjf!6OpwhC;UXszS`(mLY5JkffCD65}D=ZkgC<4)z)|v+{-D<{} zW}5Nf2k6GNnZ86Hr+;6e!|(Dx{oOxbdR+YM_0CT<(nfRL2)0_b;fNulnw$;`m9gY_ zy^w8LPCAoQ%1XSFlZ06(VcV9qw&~2ICRXgpdac$FN{g9lxw)DOE=Xo}&e7rtd1J^o z_;pF>-*vrh*V_dTy3vC}_}35P2#&&sV>pfzIEho}#c750!d6_3XhS(G@c-h8DubvIb`v)vmednfB*Mqn@`MR0R|TF z4A1cbFYyYmu>=##D4+-nC6uv(H>hA0Yp_wpIyO+lChBOQiMMFs9UQ#J2W(**AMpt; NcJR3q^X&c#`~~{c@{Rxi