diff --git a/.gitignore b/.gitignore index f518266..4a2c546 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,4 @@ avatar.jpg dc-env student.json dict_cache.json +temp_spreadsheet_creds.json diff --git a/marking_and_admin/Setting up a new year.md b/marking_and_admin/Setting up a new year.md index 45843ff..47525df 100644 --- a/marking_and_admin/Setting up a new year.md +++ b/marking_and_admin/Setting up a new year.md @@ -7,6 +7,16 @@ The file `marking_puller_2.py` is the entry point. - in Google drive, make a copy of last year's _Details & marking 2022_ file. - update the `MARKING_SPREADSHEET_ID` const with the ID from that new spreadsheet's URL. +In the past, the ID of the spreadsheet was hardcoded into the file. Now it's in the Codespace's env. + +``` +MARKING_SPREADSHEET_ID = "1wtTAM7A--ka7Lnog43L6jjo9kMCnDElCrTOBllEg4dA" # 2019 +MARKING_SPREADSHEET_ID = "1AjDu51VX26bIcLNMsr2iHq2BtrNEj91krxWKqjDW5aA" # 2020 +MARKING_SPREADSHEET_ID = "17KKMNIseRSo9IVNp-iaUCyEqbAR9tTYAcegzcvVgJFM" # 2021 +MARKING_SPREADSHEET_ID = "16tESt_4BUf-9-oD04suTprkd1O0oEl6WjzflF_avSKY" # 2022 +MARKING_SPREADSHEET_ID = "1DPBVy9DiVkdFBArOTRtj3L--f62KTnxyFFZrUXrobV0" # 2023 +``` + ## To mark work for the first time - if this is a new computer, run `git config --global url."https://github.com/".insteadOf git@github.com:` or the git library will have a tantrum @@ -22,4 +32,3 @@ The file `marking_puller_2.py` is the entry point. TODO: - congratulate everyone who has a full set of passing tests - diff --git a/marking_and_admin/jest.config.js b/marking_and_admin/jest.config.js deleted file mode 100644 index e69de29..0000000 diff --git a/marking_and_admin/mark_functions.py b/marking_and_admin/mark_functions.py index c29d421..909aeac 100644 --- a/marking_and_admin/mark_functions.py +++ b/marking_and_admin/mark_functions.py @@ -1,3 +1,5 @@ +"""All the work to actually mark the students' work.""" + import json import math import os @@ -11,7 +13,7 @@ from datetime import datetime from io import StringIO from itertools import repeat -from typing import Any # , Optional, Set, Tuple, TypeVar +from typing import Any, Union import git import pandas as pd @@ -20,6 +22,7 @@ from google.auth.transport.requests import Request from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import build +from marking_types import set_meta from pandas import DataFrame, Series @@ -50,10 +53,8 @@ def Run(self): def build_spreadsheet_service(): # If modifying these scopes, delete the file token.pickle. scopes = ["https://www.googleapis.com/auth/spreadsheets"] - """Shows basic usage of the Sheets API. - Prints values from a sample spreadsheet. - """ creds = None + # Shows basic usage of the Sheets API. Prints values from a sample spreadsheet. # The file token.pickle stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. @@ -65,14 +66,23 @@ def build_spreadsheet_service(): if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: + with open("temp_spreadsheet_creds.json", "w", encoding="utf-8") as tsc: + tsc.write(os.getenv("SPREADSHEET_CREDS", "")) flow = InstalledAppFlow.from_client_secrets_file( - "marking_and_admin/credentials.json", scopes + "temp_spreadsheet_creds.json", + scopes, + redirect_uri="https://design-computing.github.io/", ) + # with open("temp_spreadsheet_creds.json", "w", encoding="utf-8") as tsc: + # tsc.write("") try: + pass creds = flow.run_local_server() except OSError as os_e: print(os_e) creds = flow.run_console() + except Exception as mystery_error: + print(mystery_error) # Save the credentials for the next run with open("token.pickle", "wb") as token: pickle.dump(creds, token) @@ -111,9 +121,9 @@ def write(service, data=[["These"], ["are"], ["some"], ["d", "entries"]]): def process_for_writing(data): for i, row in enumerate(data): for j, item in enumerate(row): - if type(item) is dict or type(item) is yaml.comments.CommentedMap: + if isinstance(item, dict) or isinstance(item, yaml.comments.CommentedMap): data[i][j] = item.get("mark", str(dict(item))) - elif type(item) is not str and math.isnan(item): + elif (not isinstance(item, str)) and math.isnan(item): data[i][j] = "" return data @@ -122,7 +132,7 @@ def process_for_notes(data): comments = [] for i, row in enumerate(data): for j, item in enumerate(row): - if type(item) is dict: + if isinstance(item, dict): readable_comment: str = prepare_comment(item) ss_comment_package: dict = set_comment(j, i, readable_comment) comments.append(ss_comment_package) @@ -131,31 +141,30 @@ def process_for_notes(data): def prepare_comment(item: dict) -> str: if "results" not in item.keys(): - fu = "some kind of major fuck up" - return f"⚠ {item.get('bigerror', fu)} ⏱ {round(item.get('time', 0))}" + fk_up = "some kind of major fuck up" + return f"⚠ {item.get('bigerror', fk_up)} ⏱ {round(item.get('time', 0))}" test_results = [] - for r in item["results"]: - icon = "👏" if r["value"] == 1 else "💩" - test_results.append( - f"{icon}: {r['name']}" - ) # TODO: trace this back, and get rid of the "name" key, make it exercise_name, or test_name - tr = "\n".join(test_results) + for res in item["results"]: + icon = "👏" if res["value"] == 1 else "💩" + test_results.append(f"{icon}: {res['name']}") + # TODO: trace this back, and get rid of the "name" key, make it exercise_name, or test_name + test_res = "\n".join(test_results) message = f"""{item['repo_owner']} ⏱ {round(item['time'])} -{tr} +{test_res} {item['mark']}/{item['of_total']}""" return message -def set_comment(x, y, comment, y_offset=1): +def set_comment(x_coord, y_coord, comment, y_offset=1): request: dict[str, Any] = { "repeatCell": { "range": { "sheetId": 1704890600, - "startRowIndex": y + y_offset, - "endRowIndex": y + 1 + y_offset, - "startColumnIndex": x, - "endColumnIndex": x + 1, + "startRowIndex": y_coord + y_offset, + "endRowIndex": y_coord + 1 + y_offset, + "startColumnIndex": x_coord, + "endColumnIndex": x_coord + 1, }, "cell": {"note": comment}, "fields": "note", @@ -164,10 +173,10 @@ def set_comment(x, y, comment, y_offset=1): return request -def get_DF_from_CSV_URL(url, column_names=False): +def get_df_from_csv_url(/service/url: str, column_names: Union[list[str], bool] = False): """Get a csv of values from google docs.""" - r = requests.get(url) - data = r.text + res = requests.get(url) + data = res.text if column_names: return pd.read_csv(StringIO(data), header=0, names=column_names) else: @@ -184,9 +193,12 @@ def get_forks( Limits to repos created this year (THIS_YEAR as a const) Args: - org (str, optional): The name of the Github user/organisation to pull the forks from. Defaults to "design-computing". - repo (str, optional): The name of the repo to get the forks of. Defaults to "me". - force_inclusion_of_these_repos (list[str], optional): _description_. Defaults to []. + org (str, optional): The name of the Github user/organisation to pull + the forks from. Defaults to "design-computing". + repo (str, optional): The name of the repo to get the forks of. + Defaults to "me". + force_inclusion_of_these_repos (list[str], optional): _description_. + Defaults to []. Raises: Exception: _description_ @@ -198,8 +210,10 @@ def get_forks( api = "https://api.github.com/" limit = 100 # TODO: #29 take these secrets out, put them in an env, and reset them - client_id = os.getenv("CLIENT_ID_GITHUB","") # "040e86e3feed633710a0" - secret = os.getenv("SECRET_GITHUB","")#"69588d73388091b5ff8635fd1a788ea79177bf69" + client_id = os.getenv("CLIENT_ID_GITHUB", "") # "040e86e3feed633710a0" + secret = os.getenv( + "SECRET_GITHUB", "" + ) # "69588d73388091b5ff8635fd1a788ea79177bf69" url = ( f"{api}/repos/{org}/{repo}/forks?" f"per_page={limit}&" @@ -207,32 +221,33 @@ def get_forks( f"client_secret={secret}'" ) print("get forks from:\n", url) - r = requests.get(url) - if r.status_code == 200: - forks = r.json() + response = requests.get(url) + if response.status_code == 200: + forks = response.json() repos = [ {"owner": fork["owner"]["login"], "git_url": fork["git_url"]} for fork in forks # filter for this year's repos if (fork["created_at"][:4] == THIS_YEAR) - # a list of repos to get that aren't this year's, to account for students retaking the course + # a list of repos to get that aren't this year's, + # to account for students retaking the course or (fork["owner"]["login"] in force_inclusion_of_these_repos) ] return repos else: - rate_limit_message(r) + rate_limit_message(response) raise Exception("GitHubFuckYouError") -def rate_limit_message(r): +def rate_limit_message(response): rate_limit = requests.get("https://api.github.com/rate_limit").json().get("rate") reset_time = str( time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(rate_limit["reset"])) ) print( - r.status_code, - r.reason, - json.dumps(r.json(), indent=2), + response.status_code, + response.reason, + json.dumps(response.json(), indent=2), json.dumps(rate_limit, indent=2), "try again at" + reset_time, sep="\n", @@ -242,13 +257,13 @@ def rate_limit_message(r): def update_repos(row: Series) -> str: """Git clone a repo, or if already cloned, git pull.""" url = row["git_url"] - https_url = url.replace("git://","https://") + https_url = url.replace("git://", "https://") owner = row["owner"] path = os.path.normpath(os.path.join(ROOTDIR, owner)) - t = datetime.now().strftime("%H:%M:%S") + time_now_str = datetime.now().strftime("%H:%M:%S") try: git.Repo.clone_from(https_url, path) - print(f"{t}: new repo for {owner}") + print(f"{time_now_str}: new repo for {owner}") return ":) new" except git.GitCommandError as git_command_error: if "already exists and is not an empty directory" in git_command_error.stderr: @@ -258,7 +273,7 @@ def update_repos(row: Series) -> str: repo = git.cmd.Git(path) try: response = repo.pull() - print(f"{t}: pulled {owner}'s repo: {response}") + print(f"{time_now_str}: pulled {owner}'s repo: {response}") return str(response) except Exception as general_exception: repo.execute(["git", "fetch", "--all"]) @@ -287,20 +302,21 @@ def update_repos(row: Series) -> str: return message -def try_to_kill(file_path: str, CHATTY: bool = False): +def try_to_kill(file_path: str): """Attempt to delete the file specified by file_path.""" try: os.remove(file_path) print(f"deleted {file_path}") - except Exception as e: + except Exception as mystery_error: if CHATTY: - print(file_path, e) + print(file_path, mystery_error) -def pull_all_repos(dirList, CHATTY: bool = False, hardcore_pull: bool = False): +def pull_all_repos(dir_list, hardcore_pull: bool = False): """Pull latest version of all repos.""" - of_total = len(dirList) - for i, student_repo in enumerate(dirList): + + of_total = len(dir_list) + for i, student_repo in enumerate(dir_list): repo_is_here = os.path.join(ROOTDIR, student_repo) try: repo = git.cmd.Git(repo_is_here) @@ -308,16 +324,16 @@ def pull_all_repos(dirList, CHATTY: bool = False, hardcore_pull: bool = False): repo.execute(["git", "fetch", "--all"]) repo.execute(["git", "reset", "--hard", "origin/main"]) repo.pull() # probably not needed, but belt and braces - t = datetime.now().strftime("%H:%M:%S") - print(f"{t}: {i}/{of_total} pulled {student_repo}'s repo") - except Exception as e: - print(student_repo, e) + time_now_str = datetime.now().strftime("%H:%M:%S") + print(f"{time_now_str}: {i}/{of_total} pulled {student_repo}'s repo") + except Exception as mystery_exception: + print(student_repo, mystery_exception) -def csv_of_details(dirList): +def csv_of_details(dir_list): """Make a CSV of all the students.""" results = [] - for student_repo in dirList: + for student_repo in dir_list: path = os.path.join(ROOTDIR, student_repo, "aboutMe.yml") details = open(path).read() # replaces the @ symbol @@ -335,14 +351,14 @@ def csv_of_details(dirList): if details["studentNumber"] == "z1234567": print(student_repo, "hasn't updated") - except Exception as e: + except Exception as mystery_error: print(details) - results.append({"error": e, "repoName": student_repo}) + results.append({"error": mystery_error, "repoName": student_repo}) print("\n\nResults:") - resultsDF = pd.DataFrame(results) + results_df = pd.DataFrame(results) # print(resultsDF) - resultsDF.to_csv(os.path.join(CWD, "csv/studentDetails.csv")) + results_df.to_csv(os.path.join(CWD, "csv/studentDetails.csv")) fix_up_csv() @@ -357,7 +373,7 @@ def fix_up_csv(path="csv/studentDetails.csv"): line = line.replace("^AT^", "@") line = line.replace(",,", ",-,") lines.append(line) - with open(path, "w") as outfile: + with open(path, "w", encoding="utf-8") as outfile: for line in lines: print(line) outfile.write(line) @@ -365,12 +381,14 @@ def fix_up_csv(path="csv/studentDetails.csv"): def log_progress(message, logfile_name): """Write a message to a logfile.""" - completed_students_list = open(logfile_name, "a") + completed_students_list = open(logfile_name, "a", encoding="utf-8") completed_students_list.write(message) completed_students_list.close() -def get_readmes(row, output="mark", print_labbooks=False): +def get_readmes( + row, output="mark", print_labbooks=False +) -> Union[int, str, list[Union[int, str]]]: """Get the text, or the mark, or both related to log books.""" # intro_set = "TODO: Reflect on what you learned this set and what is still unclear." # intro_week = "TODO: Reflect on what you learned this week and what is still unclear." @@ -380,11 +398,11 @@ def get_readmes(row, output="mark", print_labbooks=False): mark = 0 all_readme = "" for i in range(1, 11): - p = os.path.join(path, f"set{i}", "readme.md") - if os.path.isfile(p): + file_path = os.path.join(path, f"set{i}", "readme.md") + if os.path.isfile(file_path): try: - with open(p, "r", encoding="utf-8", errors="ignore") as f: - contents = f.read() + with open(file_path, "r", encoding="utf-8", errors="ignore") as file: + contents = file.read() new = re.sub(regex, subst, contents, 0, re.MULTILINE).strip() # print(i,"|", new, "|", len(new)) if len(new) > 0: @@ -404,6 +422,20 @@ def get_readmes(row, output="mark", print_labbooks=False): return [mark, all_readme] +def get_readme_text(row) -> str: + """Get the collected text of all the readme files.""" + text = get_readmes(row, output="textList", print_labbooks=False) + assert isinstance(text, str) + return text + + +def get_readme_mark(row) -> int: + """Get the number of readmen files that are filled in.""" + mark = get_readmes(row, output="mark", print_labbooks=False) + assert isinstance(mark, int) + return mark + + def test_in_clean_environment( row: Series, set_number: int, @@ -448,8 +480,8 @@ def get_existing_marks_from_csv(row: Series, set_number: int) -> dict: except KeyError as k: print(f"no marks for set{set_number}", k) return {} - except Exception as e: - print(e) + except Exception as mystery_error: + print(mystery_error) return {} @@ -484,24 +516,35 @@ def mark_a_specific_person_week( test_args = [python, path_to_test_shim, path_to_tests, path_to_repo, row.owner] try: + time_in = datetime.now() RunCmd(test_args, timeout).Run() # this is unessarily complicated - + time_out = datetime.now() + total_time_seconds = (time_out - time_in).total_seconds() # full_path = os.path.join(LOCAL, temp_file_path) with open( temp_file_path, "r", encoding="utf-8", errors="ignore" ) as temp_results: contents = temp_results.read() - # TODO: catch empty string contents, and make the error message better - results_dict = json.loads(contents) - results_dict["bigerror"] = ":)" + practical_timeout = math.floor(timeout * 0.98) + if total_time_seconds > practical_timeout: + print("\n\nAnnoying timeout ⌛⏳⌛⏳", "\n" * 5) + message = ( + "Execution timed out. " + + f"It was given {practical_timeout} seconds to complete." + ) + results_dict = {"bigerror": message, "gh_username": row.owner} + else: + # TODO: catch empty string contents, and make the error message better + results_dict = json.loads(contents) + results_dict["bigerror"] = ":)" log_progress(f" good for w{set_number}\n", logfile_name) - except Exception as e: + except json.JSONDecodeError as json_exception: results_dict = { - "bigerror": str(e).replace(",", "~"), + "bigerror": str(json_exception).replace(",", "~"), "gh_username": row.owner, } # the comma messes with the csv - log_progress(f" bad {e} w{set_number}\n", logfile_name) + log_progress(f" bad {json_exception} w{set_number}\n", logfile_name) elapsed_time = time.time() - start_time results_dict["time"] = elapsed_time @@ -514,50 +557,50 @@ def get_safe_path(*parts): return abs_path -def prepare_log(logfile_name, firstLine="here we go:\n"): +def prepare_log(logfile_name, first_line="here we go:\n"): """Create or empty the log file.""" - completed_students_list = open(logfile_name, "w") - completed_students_list.write(firstLine) + completed_students_list = open(logfile_name, "w", encoding="utf-8") + completed_students_list.write(first_line) completed_students_list.close() -def mark_work(dirList, set_number, root_dir, dfPlease=True, timeout=5): +def mark_work(dir_list, set_number, root_dir, df_please=True, timeout=5): """Mark the set's exercises.""" logfile_name = "temp_completion_log" prepare_log(logfile_name) - r = len(dirList) # for repeat count + repeat_count = len(dir_list) # for repeat count results = list( map( test_in_clean_environment, # Function name - dirList, # student_repo - repeat(root_dir, r), # root_dir - repeat(set_number, r), # set_number - repeat(logfile_name, r), # logfile_name - repeat(timeout, r), # timeout + dir_list, # student_repo + repeat(root_dir, repeat_count), # root_dir + repeat(set_number, repeat_count), # set_number + repeat(logfile_name, repeat_count), # logfile_name + repeat(timeout, repeat_count), # timeout ) ) - resultsDF = pd.DataFrame(results) + results_df = pd.DataFrame(results) csv_path = f"csv/set{set_number}marks.csv" - resultsDF.to_csv(os.path.join(CWD, csv_path), index=False) + results_df.to_csv(os.path.join(CWD, csv_path), index=False) for _ in [1, 2, 3]: # this is pretty dirty, but it gets tricky when you have # ,,, -> ,-,, because each intance needs to be replaced multiple times # TODO: #makeitnice fix_up_csv(path=csv_path) print("\n+-+-+-+-+-+-+-+\n\n") - if dfPlease: - return resultsDF + if df_please: + return results_df def get_details(row: Series) -> dict: try: - path_to_aboutMe = os.path.abspath( + path_to_about_me = os.path.abspath( os.path.join(ROOTDIR, row.owner, "aboutMe.yml") ) - with open(path_to_aboutMe, "r", encoding="utf-8") as yf: - details_raw_yaml = yf.read() + with open(path_to_about_me, "r", encoding="utf-8") as y_file: + details_raw_yaml = y_file.read() details: dict = dict(yaml.load(details_raw_yaml, yaml.RoundTripLoader)) details["error"] = "👍" details["owner"] = row.owner @@ -585,7 +628,7 @@ def get_last_commit(row: Series) -> str: # TODO: find out why I was returning a no commits error -def mark_week( +def mark_set( mark_sheet: DataFrame, set_number: int = 1, timeout: int = 10, @@ -594,10 +637,14 @@ def mark_week( """Mark a single week for all students. Args: - mark_sheet (Dataframe): A dataframe that describes who's going to get marked - set_number (int, optional): The number of the set that we're marking. Defaults to 1. - timeout (int, optional): number of seconds to try for before we cut this student off. Defaults to 10. - active (bool, optional): Is this week being marked yet?. Defaults to True. + mark_sheet (Dataframe): A dataframe that describes who's + going to get marked. + set_number (int, optional): The number of the set that we're marking. + Defaults to 1. + timeout (int, optional): number of seconds to try for before we cut + this student off. Defaults to 10. + active (bool, optional): Is this week being marked yet?. + Defaults to True. Returns: Series: A series of the marks, or if not active yet, 0 @@ -614,19 +661,39 @@ def mark_week( def do_the_marking( - this_year="2023", - rootdir="../StudentRepos", - chatty=False, + this_year: str = "2023", + rootdir: str = "../StudentRepos", + chatty: bool = False, force_marking=False, - marking_spreadsheet_id="16tESt_4BUf-9-oD04suTprkd1O0oEl6WjzflF_avSKY", # 2022 - marks_csv="marks.csv", - mark_w1=True, - mark_w2=False, - mark_w3=False, - mark_w4=False, - mark_w5=False, - mark_exam=False, -): + marking_spreadsheet_id: str = "16tESt_4BUf-9-oD04suTprkd1O0oEl6WjzflF_avSKY", # 2022 + marks_csv: str = "marks.csv", + set_1: set_meta = {"timeout": 5, "active": False}, + set_2: set_meta = {"timeout": 5, "active": False}, + set_3: set_meta = {"timeout": 5, "active": False}, + set_4: set_meta = {"timeout": 5, "active": False}, + set_5: set_meta = {"timeout": 5, "active": False}, + exam: set_meta = {"timeout": 5, "active": False}, + test_number_of_students: int = 0, + force_repos: list[str] = [], +) -> None: + """do_the_marking Runs tests against all student work. + + Args: + this_year (str, optional): The year that you want to test. Defaults to "2023". + rootdir (str, optional): Where you want to keep all the repos you're + working with. Defaults to "../StudentRepos". + chatty (bool, optional): Do you want it to be verbose? Defaults to False. + force_marking (bool, optional): _description_. Defaults to False. + marking_spreadsheet_id (str, optional): _description_. Defaults to + "16tESt_4BUf-9-oD04suTprkd1O0oEl6WjzflF_avSKY". + mark_w1 (bool, optional): _description_. Defaults to True. + mark_w2 (bool, optional): _description_. Defaults to False. + mark_w3 (bool, optional): _description_. Defaults to False. + mark_w4 (bool, optional): _description_. Defaults to False. + mark_w5 (bool, optional): _description_. Defaults to False. + mark_exam (bool, optional): _description_. Defaults to False. + test_number_of_students (int, optional): _description_. Defaults to 0. + """ global THIS_YEAR THIS_YEAR = this_year global ROOTDIR @@ -639,6 +706,8 @@ def do_the_marking( MARKING_SPREADSHEET_ID = marking_spreadsheet_id global MARKS_CSV MARKS_CSV = marks_csv + global FORCE_REPOS + FORCE_REPOS = force_repos start_time = time.time() @@ -649,6 +718,8 @@ def do_the_marking( students = get_student_data() mark_sheet = pd.DataFrame(students) + if test_number_of_students > 0: + mark_sheet = mark_sheet.sample(test_number_of_students) deets = pd.DataFrame(list(mark_sheet.apply(get_details, axis=1))) # temp: @@ -658,42 +729,89 @@ def do_the_marking( mark_sheet["updated"] = mark_sheet.apply(update_repos, axis=1) mark_sheet["last_commit"] = mark_sheet.apply(get_last_commit, axis=1) - mark_sheet["set1"] = mark_week(mark_sheet, set_number=1, timeout=15, active=mark_w1) - mark_sheet["set2"] = mark_week(mark_sheet, set_number=2, timeout=15, active=mark_w2) - mark_sheet["set3"] = mark_week(mark_sheet, set_number=3, timeout=30, active=mark_w3) - mark_sheet["set4"] = mark_week(mark_sheet, set_number=4, timeout=50, active=mark_w4) - mark_sheet["set5"] = mark_week(mark_sheet, set_number=5, timeout=50, active=mark_w5) - mark_sheet["exam"] = mark_week( - mark_sheet, set_number=8, timeout=45, active=mark_exam + mark_sheet["set1"] = mark_set( + mark_sheet, set_number=1, timeout=set_1["timeout"], active=set_1["active"] + ) + mark_sheet["set2"] = mark_set( + mark_sheet, set_number=2, timeout=set_2["timeout"], active=set_2["active"] + ) + mark_sheet["set3"] = mark_set( + mark_sheet, set_number=3, timeout=set_3["timeout"], active=set_3["active"] + ) + mark_sheet["set4"] = mark_set( + mark_sheet, set_number=4, timeout=set_4["timeout"], active=set_4["active"] + ) + mark_sheet["set5"] = mark_set( + mark_sheet, set_number=5, timeout=set_5["timeout"], active=set_5["active"] + ) + mark_sheet["exam"] = mark_set( + mark_sheet, set_number=8, timeout=exam["timeout"], active=exam["active"] ) mark_sheet.drop(["name"], axis=1, errors="ignore", inplace=True) - mark_sheet["readme_mark"] = mark_sheet.apply(get_readmes, args=("mark",), axis=1) - mark_sheet["readme_text"] = mark_sheet.apply( - get_readmes, args=("textList",), axis=1 - ) + mark_sheet["readme_mark"] = mark_sheet.apply(get_readme_mark, axis=1) + mark_sheet["readme_text"] = mark_sheet.apply(get_readme_text, axis=1) + use_nice_spreadsheet_connection = False + if not use_nice_spreadsheet_connection: + convert_result_dicts_to_ints(mark_sheet) mark_sheet.to_csv(MARKS_CSV) - data = [list(x) for x in mark_sheet.to_numpy()] - service = build_spreadsheet_service() - write(service, data=data) + if use_nice_spreadsheet_connection: + data = [list(x) for x in mark_sheet.to_numpy()] + service = build_spreadsheet_service() + write(service, data=data) print("that took", (time.time() - start_time) / 60, "minutes") +def convert_result_dicts_to_ints(mark_sheet): + """Convert the dict of results into a single mark. + + dict looks like this: + { + 'of_total': 2, + 'mark': 0, + 'results': [ + {'value': 0, 'name': 'Exercise 2: debug the file'}, + {'value': 0, 'name': 'Lab book entry completed'} + ], + 'week_number': 2, + 'localError': ':)', + 'repo_owner': 'rhiannon84', + 'bigerror': ':)', + 'time': 4.783979892730713 + } + so we're just pulling the mark out, but it's fraught, so there's some checking. + """ + + def convert_one_results_dict_to_an_int(results_dict) -> int: + try: + return results_dict.get("mark", 0) + except AttributeError as attr_err: + print(attr_err) + return 0 + + for i in range(1, 6): + mark_sheet[f"set{i}_data"] = mark_sheet[f"set{i}"] + mark_sheet[f"set{i}"] = mark_sheet[f"set{i}"].apply( + convert_one_results_dict_to_an_int + ) + + mark_sheet["exam_data"] = mark_sheet[f"exam"] + mark_sheet[f"exam"] = mark_sheet[f"exam"].apply(convert_one_results_dict_to_an_int) + + def get_student_data(): - # TODO: instead of loading the pickle, load the marks.csv file so that - # the dataframe is preloaded with values. Then it doesn't need to mark students - # that haven't updated their work. students = None file_name = "student.json" if os.path.exists(file_name): - with open(file_name, "r") as data_file: + with open(file_name, "r", encoding="utf-8") as data_file: students = json.load(data_file) else: - students = get_forks(force_inclusion_of_these_repos=[]) - with open("student.json", "w") as data_file: + force_repos = FORCE_REPOS + students = get_forks(force_inclusion_of_these_repos=force_repos) + with open("student.json", "w", encoding="utf-8") as data_file: json.dump(students, data_file, indent=2) return students diff --git a/marking_and_admin/marker.py b/marking_and_admin/marker.py new file mode 100644 index 0000000..96c3795 --- /dev/null +++ b/marking_and_admin/marker.py @@ -0,0 +1,39 @@ +# -*- coding: UTF-8 -*- +"""Get the latest copy of all the repos. + +This pulls the latest copy of all the repos +It can clone new repos if you delete the students pickle +""" +import os +import sys + +from mark_functions import do_the_marking + +MARKING_SPREADSHEET_ID = os.getenv("GOOGLE_SHEETS_KEY", "") + + +if __name__ == "__main__" and MARKING_SPREADSHEET_ID != "": + sys.path.insert(0, "/workspaces/me/set2") + + do_the_marking( + this_year="2023", + rootdir="../StudentRepos", + chatty=False, + force_marking=True, + marking_spreadsheet_id=MARKING_SPREADSHEET_ID, + marks_csv="marking_and_admin/marks.csv", + set_1={"timeout": 15, "active": True}, + set_2={"timeout": 15, "active": True}, + set_3={"timeout": 30, "active": True}, + set_4={"timeout": 50, "active": True}, + set_5={"timeout": 50, "active": False}, + exam={"timeout": 45, "active": True}, + test_number_of_students=0, # if more than 0, will only mark a sample of N repos + force_repos=["lvl-lim", "JeWang"], + ) +elif MARKING_SPREADSHEET_ID == "": + print( + "The MARKING_SPREADSHEET_ID is supposed to come from the env. Either " + "Ben hasn't granted you permissions, or the env is broken in some way." + "It's stored in the codespace's secrets." + ) diff --git a/marking_and_admin/marking_puller.py b/marking_and_admin/marking_puller.py deleted file mode 100644 index 9b8aa8c..0000000 --- a/marking_and_admin/marking_puller.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: UTF-8 -*- -"""Get the latest copy of all the repos. - -This pulls the latest copy of all the repos -It can clone new repos if you delete the students pickle -""" -import os - -from mark_functions import do_the_marking - -# The ID and range of a sample spreadsheet. -# MARKING_SPREADSHEET_ID = "1wtTAM7A--ka7Lnog43L6jjo9kMCnDElCrTOBllEg4dA" # 2019 -# MARKING_SPREADSHEET_ID = "1AjDu51VX26bIcLNMsr2iHq2BtrNEj91krxWKqjDW5aA" # 2020 -# MARKING_SPREADSHEET_ID = "17KKMNIseRSo9IVNp-iaUCyEqbAR9tTYAcegzcvVgJFM" # 2021 -# MARKING_SPREADSHEET_ID = "16tESt_4BUf-9-oD04suTprkd1O0oEl6WjzflF_avSKY" # 2022 -# MARKING_SPREADSHEET_ID = "1DPBVy9DiVkdFBArOTRtj3L--f62KTnxyFFZrUXrobV0" # 2023 -MARKING_SPREADSHEET_ID = os.getenv("GOOGLE_SHEETS_KEY", "") - - -if __name__ == "__main__": - do_the_marking( - this_year="2023", - rootdir="../StudentRepos", - chatty=False, - force_marking=True, - marking_spreadsheet_id=MARKING_SPREADSHEET_ID, - marks_csv="marks.csv", - mark_w1=True, - mark_w2=True, - mark_w3=False, - mark_w4=False, - mark_w5=False, - mark_exam=False, - ) diff --git a/marking_and_admin/marking_types.py b/marking_and_admin/marking_types.py new file mode 100644 index 0000000..7a9dc84 --- /dev/null +++ b/marking_and_admin/marking_types.py @@ -0,0 +1,8 @@ +from typing import TypedDict + + +class set_meta(TypedDict): + """week is just to keep the typechecker happy.""" + + timeout: int + active: bool diff --git a/marking_and_admin/all week's tests.py b/marking_and_admin/old_code/all week's tests.py similarity index 70% rename from marking_and_admin/all week's tests.py rename to marking_and_admin/old_code/all week's tests.py index e8d40e2..20747d1 100644 --- a/marking_and_admin/all week's tests.py +++ b/marking_and_admin/old_code/all week's tests.py @@ -1,3 +1,5 @@ +"""Run all the weeks of one person's tests. +""" import os for i in range(1, 10): diff --git a/marking_and_admin/gitTest.py b/marking_and_admin/old_code/git_test.py similarity index 100% rename from marking_and_admin/gitTest.py rename to marking_and_admin/old_code/git_test.py diff --git a/marking_and_admin/old_code/google_auth_test.py b/marking_and_admin/old_code/google_auth_test.py new file mode 100644 index 0000000..fb93c92 --- /dev/null +++ b/marking_and_admin/old_code/google_auth_test.py @@ -0,0 +1,76 @@ +# import os + +# from google_auth_oauthlib.flow import Flow + +# with open("temp_spreadsheet_creds.json", "w", encoding="utf-8") as tsc: +# tsc.write(os.getenv("SPREADSHEET_CREDS", "")) +# # Create the flow using the client secrets file from the Google API +# # Console. +# flow = Flow.from_client_secrets_file( +# "temp_spreadsheet_creds.json", +# scopes=["https://www.googleapis.com/auth/spreadsheets"], +# redirect_uri="https://design-computing.github.io/", +# ) + +# # Tell the user to go to the authorization URL. +# auth_url, _ = flow.authorization_url(https://github.com/prompt=%22consent") + +# print(f"Please go to this URL:\n\n{auth_url}\n") + +# # The user will get an authorization code. This code is used to get the +# # access token. +# code = input("Enter the authorization code: ") +# flow.fetch_token(code=code) + +# # You can use flow.credentials, or you can just get a requests session +# # using flow.authorized_session. +# session = flow.authorized_session() +# print(session.get("https://www.googleapis.com/userinfo/v2/me").json()) + + +import os +import pprint + +import google.oauth2.credentials +from google_auth_oauthlib.flow import InstalledAppFlow +from googleapiclient.discovery import build +from googleapiclient.errors import HttpError + +pp = pprint.PrettyPrinter(indent=2) + +# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains +# the OAuth 2.0 information for this application, including its client_id and +# client_secret. +with open("temp_spreadsheet_creds.json", "w", encoding="utf-8") as tsc: + tsc.write(os.getenv("SPREADSHEET_CREDS", "")) +CLIENT_SECRETS_FILE = "temp_spreadsheet_creds.json" + +# This access scope grants read-only access to the authenticated user's Drive +# account. +SCOPES = ["https://www.googleapis.com/auth/spreadsheets"] +API_SERVICE_NAME = "spreadsheets" +API_VERSION = "v4" + + +def get_authenticated_service(): + flow = InstalledAppFlow.from_client_secrets_file( + CLIENT_SECRETS_FILE, + SCOPES, + redirect_uri="https://design-computing.github.io/", + ) + credentials = flow.run_console() + return build(API_SERVICE_NAME, API_VERSION, credentials=credentials) + + +def list_drive_files(service, **kwargs): + results = service.files().list(**kwargs).execute() + + pp.pprint(results) + + +if __name__ == "__main__": + # When running locally, disable OAuthlib's HTTPs verification. When + # running in production *do not* leave this option enabled. + os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" + service = get_authenticated_service() + list_drive_files(service, orderBy="modifiedByMeTime desc", pageSize=5) diff --git a/marking_and_admin/how_did_x_do_y.py b/marking_and_admin/old_code/how_did_x_do_y.py similarity index 100% rename from marking_and_admin/how_did_x_do_y.py rename to marking_and_admin/old_code/how_did_x_do_y.py diff --git a/marking_and_admin/inspector_shim.py b/marking_and_admin/old_code/inspector_shim.py similarity index 100% rename from marking_and_admin/inspector_shim.py rename to marking_and_admin/old_code/inspector_shim.py diff --git a/marking_and_admin/labInspector.py b/marking_and_admin/old_code/lab_inspector.py similarity index 100% rename from marking_and_admin/labInspector.py rename to marking_and_admin/old_code/lab_inspector.py diff --git a/marking_and_admin/lambdaTester.py b/marking_and_admin/old_code/lambda_tester.py similarity index 100% rename from marking_and_admin/lambdaTester.py rename to marking_and_admin/old_code/lambda_tester.py diff --git a/marking_and_admin/old_css.css b/marking_and_admin/old_code/old_css.css similarity index 100% rename from marking_and_admin/old_css.css rename to marking_and_admin/old_code/old_css.css diff --git a/marking_and_admin/quickstart.py b/marking_and_admin/old_code/quickstart.py similarity index 100% rename from marking_and_admin/quickstart.py rename to marking_and_admin/old_code/quickstart.py diff --git a/marking_and_admin/testFixer.py b/marking_and_admin/old_code/test_fixer.py similarity index 100% rename from marking_and_admin/testFixer.py rename to marking_and_admin/old_code/test_fixer.py diff --git a/marking_and_admin/tester.py b/marking_and_admin/old_code/tester.py similarity index 96% rename from marking_and_admin/tester.py rename to marking_and_admin/old_code/tester.py index cf093d0..215c35b 100644 --- a/marking_and_admin/tester.py +++ b/marking_and_admin/old_code/tester.py @@ -3,7 +3,7 @@ import os import time -from marking_puller import RunCmd +from mark_functions import RunCmd LOCAL = os.path.dirname(os.path.realpath(__file__)) week_number = 1 diff --git a/marking_and_admin/test_shim.py b/marking_and_admin/test_shim.py index 79c5817..209c6e7 100644 --- a/marking_and_admin/test_shim.py +++ b/marking_and_admin/test_shim.py @@ -25,14 +25,15 @@ def do_the_test(repo_path): test = importlib.util.module_from_spec(spec) spec.loader.exec_module(test) print("about to test", repo_path) - r = test.theTests(repo_path) - r["localError"] = ":)" - return r - except Exception as e: + results = test.theTests(repo_path) + results["localError"] = ":)" + return results + except Exception as mystery_error: return { "of_total": 0, "mark": 0, - "localError": str(e).replace(",", "~"), # the comma messes with the csv + "localError": str(mystery_error).replace(",", "~"), + # the comma messes with the csv } @@ -57,22 +58,22 @@ def results_as_json(repo_path): REPO_PATH = os.path.normpath(sys.argv[2]) OWNER = sys.argv[3] -print("\n In the shim\n◹◸◹◸◹◸◹◸◹◸◹◸\n\nsys.argv:") -for i, a in list(enumerate(sys.argv)): - print(f"{i}: {a}") +# print("\n In the shim\n◹◸◹◸◹◸◹◸◹◸◹◸\n\nsys.argv:") +# for i, a in list(enumerate(sys.argv)): +# print(f"{i}: {a}") -print( - f""" +# print( +# f""" -TEST_PATH: {TEST_PATH} -REPO_PATH: {os.path.normpath(os.path.abspath(REPO_PATH))} -OWNER: {OWNER} -""" -) +# TEST_PATH: {TEST_PATH} +# REPO_PATH: {os.path.normpath(os.path.abspath(REPO_PATH))} +# OWNER: {OWNER} +# """ +# ) -with open("temp_results.json", "w") as temp_results: - results = results_as_json(REPO_PATH) - temp_results.write(results) +with open("temp_results.json", "w", encoding="utf-8") as temp_results: + test_results = results_as_json(REPO_PATH) + temp_results.write(test_results) sleep(0.50) sleep(0.50) diff --git a/md/week9.md b/md/week9.md new file mode 100644 index 0000000..5380e6a --- /dev/null +++ b/md/week9.md @@ -0,0 +1,26 @@ +# The Exam + +Pretty simple instructions here: + +1. Pull the latest version of the course repo +1. rename `set8/exercise1.py` to `set8/exercise1337.py` +1. Run the stests against set 8 +1. Solve the puzzles +1. commit +1. Push +1. relax, or help other people, your time is your own! + + +or if you want that in more detail: + + +1. Pull the latest version of the course repo + 1. `cd ../course` + 1. `git pull` + 1. `cd ../me` +1. rename `set8/exercise1.py` to `set8/exercise1337.py` by right clicking on it, and adding the extra numbers in. +1. Run the stests against set 8, `python ../course/set8/tests.py` (putting in the 3 if that's what your environment needs). +1. Solve the puzzles, just like you would in a normal set of exercises. Do a puzzle/function, run the tests(above) check that it works, and move on to the next one +1. commit, commit commit. do it early and often. +1. Push. Don't just push once, push all the time, every half hour or so. +1. Relax, or help other people, your time is your own! diff --git a/requirements.in b/requirements.in index 906a164..ee6e5a4 100644 --- a/requirements.in +++ b/requirements.in @@ -3,11 +3,10 @@ colorama func_timeout GitPython google -google_auth_oauthlib -google-api-python-client -google-auth -google-auth-httplib2 -google-auth-oauthlib +google-api-python-client==1.7.2 +google-auth==1.8.0 +google-auth-httplib2==0.0.3 +google-auth-oauthlib==0.4.1 matplotlib mock mypy @@ -19,3 +18,5 @@ requests ruamel.yaml types-requests wheel + +# pip install --force-reinstall -v "google-api-python-client==1.7.2" "google-auth==1.8.0" "google-auth-httplib2==0.0.3" "google-auth-oauthlib==0.4.1"