Skip to content

Commit 3ec8bf6

Browse files
committed
Add pre-commit config and run it
1 parent bc9648d commit 3ec8bf6

File tree

4 files changed

+137
-38
lines changed

4 files changed

+137
-38
lines changed

.gitignore

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -174,9 +174,9 @@ cython_debug/
174174
.abstra/
175175

176176
# Visual Studio Code
177-
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
177+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
178178
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
179-
# and can be added to the global gitignore or merged into this file. However, if you prefer,
179+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
180180
# you could uncomment the following to ignore the enitre vscode folder
181181
# .vscode/
182182

@@ -191,4 +191,4 @@ cython_debug/
191191
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
192192
# refer to https://docs.cursor.com/context/ignore-files
193193
.cursorignore
194-
.cursorindexingignore
194+
.cursorindexingignore

.pre-commit-config.yaml

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
repos:
2+
- repo: https://github.com/pre-commit/pre-commit-hooks
3+
rev: v4.6.0
4+
hooks:
5+
- id: check-case-conflict
6+
- id: check-ast
7+
- id: check-docstring-first
8+
- id: check-executables-have-shebangs
9+
- id: check-added-large-files
10+
- id: check-case-conflict
11+
- id: check-merge-conflict
12+
- id: check-json
13+
- id: check-toml
14+
- id: check-yaml
15+
- id: debug-statements
16+
- id: end-of-file-fixer
17+
- id: trailing-whitespace
18+
- repo: https://github.com/jsh9/pydoclint
19+
rev: 0.5.6
20+
hooks:
21+
- id: pydoclint
22+
- repo: https://github.com/psf/black
23+
rev: 24.8.0
24+
hooks:
25+
- id: black
26+
- repo: https://github.com/pycqa/isort
27+
rev: 5.13.2
28+
hooks:
29+
- id: isort
30+
- repo: https://github.com/PyCQA/flake8
31+
rev: "7.1.1"
32+
hooks:
33+
- id: flake8
34+
- repo: https://github.com/PyCQA/autoflake
35+
rev: v2.3.1
36+
hooks:
37+
- id: autoflake
38+
args: [--remove-all-unused-imports, --in-place]
39+
- repo: https://github.com/python-jsonschema/check-jsonschema
40+
rev: 0.29.1
41+
hooks:
42+
- id: check-github-workflows
43+
- id: check-readthedocs
44+
- repo: https://github.com/executablebooks/mdformat
45+
rev: 0.7.17
46+
hooks:
47+
- id: mdformat
48+
additional_dependencies:
49+
[mdformat-gfm, mdformat-frontmatter, mdformat-footnote]

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,4 @@ Successor to [hubtraf](https://github.com/yuvipanda/hubtraf).
1818
- Support spawning on hubs with profile lists.
1919
- Support passing on to [playwright-python](https://playwright.dev/python/docs/intro) after the
2020
server has been started, so we can more accurately test user workflows directly.
21-
- Output telemetry data in a fashion that can be easily graphed.
21+
- Output telemetry data in a fashion that can be easily graphed.

simulate.py

Lines changed: 84 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1,68 +1,83 @@
11
import argparse
22
import asyncio
3-
from functools import partial
3+
import json
4+
import os
45
import secrets
5-
import time
6-
import aiometer
6+
from dataclasses import dataclass
77
from datetime import datetime
8-
from typing import List, override
8+
from functools import partial
9+
from typing import List
10+
911
import aiohttp
10-
import os
11-
import json
12+
import aiometer
13+
from playwright.async_api import Browser, async_playwright
1214
from yarl import URL
13-
from dataclasses import dataclass
14-
from playwright.async_api import async_playwright, Browser
1515

1616

1717
@dataclass
1818
class HubAccess:
1919
"""
2020
Information needed to talk to a hub
2121
"""
22+
2223
url: URL
2324
token: str
2425

26+
2527
@dataclass
2628
class Server:
2729
"""
2830
Represents a user server
2931
"""
32+
3033
servername: str
3134
username: str
3235
hub_access: HubAccess
3336

37+
3438
@dataclass
3539
class RunningServer(Server):
3640
"""
3741
Represents a running user server
3842
"""
43+
3944
start_request_time: datetime
4045
start_completion_time: datetime
4146
server_url: URL
4247
startup_events: List[dict]
4348

49+
4450
@dataclass
4551
class FailedServer(Server):
4652
"""
4753
Represents a user server that failed to start
4854
"""
55+
4956
start_request_time: datetime
5057
start_failure_time: datetime
5158
startup_events: List[dict]
5259

5360

54-
async def load_nbgitpuller_url(browser: Browser, server: RunningServer, token: str, nbgitpuller_url: URL, screenshot_name: str):
61+
async def load_nbgitpuller_url(
62+
browser: Browser,
63+
server: RunningServer,
64+
token: str,
65+
nbgitpuller_url: URL,
66+
screenshot_name: str,
67+
):
5568
print(f"visiting {server.server_url}")
56-
nbgitpuller_url = nbgitpuller_url.extend_query({
57-
'targetpath': secrets.token_urlsafe(8)
58-
})
59-
target_url = (server.server_url / nbgitpuller_url.path).with_query(nbgitpuller_url.query)
69+
nbgitpuller_url = nbgitpuller_url.extend_query(
70+
{"targetpath": secrets.token_urlsafe(8)}
71+
)
72+
target_url = (server.server_url / nbgitpuller_url.path).with_query(
73+
nbgitpuller_url.query
74+
)
6075
await_url = server.server_url / target_url.query.get("urlpath", "/lab").rstrip("/")
6176
start_time = datetime.now()
6277

63-
context = await browser.new_context(extra_http_headers={
64-
"Authorization": f"token {token}"
65-
})
78+
context = await browser.new_context(
79+
extra_http_headers={"Authorization": f"token {token}"}
80+
)
6681
page = await context.new_page()
6782
await page.goto(str(target_url))
6883
await page.wait_for_url(str(await_url), timeout=120 * 10 * 1000)
@@ -72,28 +87,35 @@ async def load_nbgitpuller_url(browser: Browser, server: RunningServer, token: s
7287
print(f"{server.server_url} completed test in {duration}")
7388

7489

75-
76-
async def start_named_server(session: aiohttp.ClientSession, server: Server) -> RunningServer | None:
90+
async def start_named_server(
91+
session: aiohttp.ClientSession, server: Server
92+
) -> RunningServer | None:
7793
"""
7894
Try to start a named server as defined
7995
8096
"""
81-
headers = {
82-
"Authorization": f"token {server.hub_access.token}"
83-
}
84-
server_api_url = server.hub_access.url / "hub/api/users" / server.username / "servers" / server.servername
97+
headers = {"Authorization": f"token {server.hub_access.token}"}
98+
server_api_url = (
99+
server.hub_access.url
100+
/ "hub/api/users"
101+
/ server.username
102+
/ "servers"
103+
/ server.servername
104+
)
85105
events = []
86106
async with session.post(server_api_url, headers=headers) as resp:
87107
start_time = datetime.now()
88108
if resp.status == 202:
89109
# we are awaiting start, let's look for events
90110
print(f"server {server.servername} waiting to start")
91-
async with session.get(server_api_url / "progress", headers=headers) as progress_resp:
111+
async with session.get(
112+
server_api_url / "progress", headers=headers
113+
) as progress_resp:
92114
async for line in progress_resp.content:
93-
if line.decode().strip() == '':
115+
if line.decode().strip() == "":
94116
# Empty line, just continue
95117
continue
96-
progress_event = json.loads(line.decode().strip()[len("data: "):])
118+
progress_event = json.loads(line.decode().strip()[len("data: ") :])
97119
events.append(progress_event)
98120
if progress_event.get("ready") == True:
99121
print(progress_event)
@@ -104,7 +126,9 @@ async def start_named_server(session: aiohttp.ClientSession, server: Server) ->
104126
start_request_time=start_time,
105127
start_completion_time=datetime.now(),
106128
startup_events=events,
107-
server_url=URL(server.hub_access.url / progress_event['url'][1:]) # Trim leading slashG
129+
server_url=URL(
130+
server.hub_access.url / progress_event["url"][1:]
131+
), # Trim leading slashG
108132
)
109133
elif resp.status == 201:
110134
# Means the server is immediately ready, and i don't want to deal with that yet
@@ -114,11 +138,23 @@ async def start_named_server(session: aiohttp.ClientSession, server: Server) ->
114138
resp.raise_for_status()
115139

116140

117-
async def payload(session: aiohttp.ClientSession, browser: Browser, auth_token: str, nbgitpuller_url: URL, server: Server):
141+
async def payload(
142+
session: aiohttp.ClientSession,
143+
browser: Browser,
144+
auth_token: str,
145+
nbgitpuller_url: URL,
146+
server: Server,
147+
):
118148
started_server = await start_named_server(session, server)
119149
match started_server:
120150
case RunningServer():
121-
await load_nbgitpuller_url(browser, started_server, auth_token, nbgitpuller_url, server.servername + ".png")
151+
await load_nbgitpuller_url(
152+
browser,
153+
started_server,
154+
auth_token,
155+
nbgitpuller_url,
156+
server.servername + ".png",
157+
)
122158
case _:
123159
print("Server startup failed")
124160

@@ -128,9 +164,16 @@ async def main():
128164
argparser.add_argument("hub_url", help="Full URL to the JupyterHub to test against")
129165
argparser.add_argument("username", help="Name of the user")
130166
argparser.add_argument("servers_count", type=int, help="Number of servers to start")
131-
argparser.add_argument("--max-concurrency", type=int, default=30, help="Max Numbers of Servers to start at the same time")
167+
argparser.add_argument(
168+
"--max-concurrency",
169+
type=int,
170+
default=30,
171+
help="Max Numbers of Servers to start at the same time",
172+
)
132173
# nbgitpuller_url = URL("git-pull?repo=https%3A%2F%2Fkernel.googlesource.com%2Fpub%2Fscm%2Flinux%2Fkernel%2Fgit%2Ftorvalds%2Flinux.git&urlpath=lab&branch=master")
133-
nbgitpuller_url = URL("git-pull?repo=https%3A%2F%2Fgithub.com%2Fspara%2Fcloud-101-geolab&urlpath=lab%2Ftree%2Fcloud-101-geolab%2F&branch=main")
174+
nbgitpuller_url = URL(
175+
"git-pull?repo=https%3A%2F%2Fgithub.com%2Fspara%2Fcloud-101-geolab&urlpath=lab%2Ftree%2Fcloud-101-geolab%2F&branch=main"
176+
)
134177

135178
args = argparser.parse_args()
136179

@@ -140,13 +183,20 @@ async def main():
140183
async with async_playwright() as p:
141184
browser = await p.firefox.launch(headless=False)
142185
async with aiohttp.ClientSession() as session:
143-
servers_to_start = [Server(f"perf-{i}", args.username, HubAccess(hub_url, token)) for i in range(args.servers_count)]
186+
servers_to_start = [
187+
Server(f"perf-{i}", args.username, HubAccess(hub_url, token))
188+
for i in range(args.servers_count)
189+
]
144190
await aiometer.run_all(
145-
[partial(payload, session, browser, token, nbgitpuller_url, server) for server in servers_to_start],
146-
max_at_once=args.max_concurrency
191+
[
192+
partial(payload, session, browser, token, nbgitpuller_url, server)
193+
for server in servers_to_start
194+
],
195+
max_at_once=args.max_concurrency,
147196
)
148197

149198
await browser.close()
150199

200+
151201
if __name__ == "__main__":
152-
asyncio.run(main())
202+
asyncio.run(main())

0 commit comments

Comments
 (0)