diff --git a/.gitignore b/.gitignore index 95cd877..2bfacce 100644 --- a/.gitignore +++ b/.gitignore @@ -376,4 +376,6 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk +cache/* + # End of https://www.toptal.com/developers/gitignore/api/java,pycharm+all,intellij+all,python,macos,windows,linux diff --git a/README.md b/README.md index 0e5a874..30a1a6b 100644 --- a/README.md +++ b/README.md @@ -63,12 +63,52 @@ python3 move_org_projects_under_project_list_then_unfollow.py # Finds repositories given a search term. Under the hood, the script searches Github for repositories that match the provided search term. -python3 follow_repos_by_search_term.py +python3 follow_repos_by_search_term.py (optional) # Finds top repositories that have a minimum 500 stars and use the provided programming language. -python3 follow_top_repos_by_star_count.py +python3 follow_top_repos_by_star_count.py (optional) + +# Unfollows all projects you're currently following that are not in a custom list. +python3 unfollow_all_followed_projects.py ``` +## The Custom Projects Lists Feature +In developing these collection of scripts, we realized that when a user follows thousands of repos in their LGTM account, there is a chance that the LGTM account will break. You won't be able to use the query console and some API +calls will be broken. + +To resolve this, we decided to create a feature users can opt-in. The "Custom Projects Lists" feature does the following: + +- Follows all repos (aka project) in your LGTM account. +- Stores every project you follow in a txt file. +- At a later date (we suggest 24 hours), the user may run a follow-up command that will take the repos followed, add them to a LGTM custom list, and finally unfollow the projects in the user's LGTM account. + +Although these steps are tedious, this is the best work-around we've found. We avoid bricking the LGTM account when projects are placed in custom lists. Also, we typically wait 24 hours since if the project is new to LGTM it will want to first process the project and projects being processed can't be added to custom lists. + +Finally, by having custom lists we hope that the security researcher will have an easier time picking which repos they want to test. + +### How To Run The Custom Projects Lists Feature +In some of the commands above, you will see the option. This is optional for all +commands. This CUSTOM_LIST_NAME represents the name of a LGTM project list that will be created and used to add projects to. Any projects found from that command will then be added to the LGTM custom list. Let's show an example below to get a better idea of how this works: + +1. Run a command passing in the name of the custom list name. The command below will follow Javascript repos and generate a cache file of every repo you follow for the project list called "cool_javascript_projects". + + `python3 follow_top_repos_by_star_count.py javascript big_ole_js_projects` + +2. Wait 1 - 24 hours. + +3. Run the command below. This will take a cached file you created earlier, create a LGTM custom project list, add the projects to that project list, and finally unfollow the repositories in your LGTM account. + + `python3 move_repos_to_lgtm_lists.py` + +Note: When naming a project custom list name, please use alphanumeric, dashes, and underscore characters only. + +### Build Processes By LGTM +LGTM can't move projects that are being processed into custom lists. To resolve this, we've added a check that confirms whether or not all projects you plan on moving to a custom list are processed. If a project isn't processed, we will not move any projects into the custom list and you'll receive the following error: + +> The can't be processed at this time because a project build is still in progress. + +If you receive this error, wait a few hours and run the script again. + ## Legal The author of this script assumes no liability for your use of this project, including, diff --git a/auto_sort_projects.py b/auto_sort_projects.py index 9c4a82f..929c579 100644 --- a/auto_sort_projects.py +++ b/auto_sort_projects.py @@ -71,7 +71,7 @@ project_list_name = gh_org_to_project_list_name[org] project_list_id = site.get_or_create_project_list(project_list_name) for project in org_to_projects[org]: - if project.is_protoproject: + if project.is_protoproject(): print('Unable to add project to project list since it is a protoproject. %s' % project) continue site.load_into_project_list(project_list_id, [project.key]) diff --git a/follow_repos_by_search_term.py b/follow_repos_by_search_term.py index f1f7688..30f6ca3 100644 --- a/follow_repos_by_search_term.py +++ b/follow_repos_by_search_term.py @@ -1,12 +1,14 @@ from typing import List -from lgtm import LGTMSite +from lgtm import LGTMSite, LGTMDataFilters + +import utils.cacher import utils.github_dates import utils.github_api import sys import time -def save_project_to_lgtm(site: 'LGTMSite', repo_name: str): +def save_project_to_lgtm(site: 'LGTMSite', repo_name: str) -> dict: print("About to save: " + repo_name) # Another throttle. Considering we are sending a request to Github # owned properties twice in a small time-frame, I would prefer for @@ -14,16 +16,20 @@ def save_project_to_lgtm(site: 'LGTMSite', repo_name: str): time.sleep(1) repo_url: str = 'https://github.com/' + repo_name - site.follow_repository(repo_url) + project = site.follow_repository(repo_url) print("Saved the project: " + repo_name) + return project -def find_and_save_projects_to_lgtm(language: str, search_term: str): +def find_and_save_projects_to_lgtm(language: str, search_term: str) -> List[str]: github = utils.github_api.create() site = LGTMSite.create_from_file() + saved_project_data: List[str] = [] for date_range in utils.github_dates.generate_dates(): - repos = github.search_repositories(query=f'language:{language} created:{date_range} {search_term}') + repos = github.search_repositories(query=f'stars:>5 language:{language} fork:false created:{date_range} {search_term}') + # TODO: This occasionally returns requests.exceptions.ConnectionError which is annoying as hell. + # It would be nice if we built in exception handling. for repo in repos: # Github has rate limiting in place hence why we add a sleep here. More info can be found here: # https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting @@ -32,7 +38,15 @@ def find_and_save_projects_to_lgtm(language: str, search_term: str): if repo.archived or repo.fork: continue - save_project_to_lgtm(site, repo.full_name) + saved_project = save_project_to_lgtm(site, repo.full_name) + + simple_project = LGTMDataFilters.build_simple_project(saved_project) + + if simple_project.is_valid_project: + saved_data = f'{simple_project.display_name},{simple_project.key},{simple_project.project_type}' + saved_project_data.append(saved_data) + + return saved_project_data if len(sys.argv) < 3: print("Please make sure you provided a language and search term") @@ -42,4 +56,9 @@ def find_and_save_projects_to_lgtm(language: str, search_term: str): search_term = sys.argv[2] print(f'Following repos for the {language} language that contain the \'{search_term}\' search term.') -find_and_save_projects_to_lgtm(language, search_term) +saved_project_data = find_and_save_projects_to_lgtm(language, search_term) + +# If the user provided a second arg then they want to create a custom list. +if len(sys.argv) <= 4: + custom_list_name = sys.argv[3] + utils.cacher.write_project_data_to_file(saved_project_data, custom_list_name) diff --git a/follow_top_repos_by_star_count.py b/follow_top_repos_by_star_count.py index 7c7cca3..8d996f9 100644 --- a/follow_top_repos_by_star_count.py +++ b/follow_top_repos_by_star_count.py @@ -1,12 +1,13 @@ from typing import List -from lgtm import LGTMSite +from lgtm import LGTMSite, LGTMDataFilters import utils.github_dates import utils.github_api +import utils.cacher import sys import time -def save_project_to_lgtm(site: 'LGTMSite', repo_name: str): +def save_project_to_lgtm(site: 'LGTMSite', repo_name: str) -> dict: print("Adding: " + repo_name) # Another throttle. Considering we are sending a request to Github # owned properties twice in a small time-frame, I would prefer for @@ -14,15 +15,18 @@ def save_project_to_lgtm(site: 'LGTMSite', repo_name: str): time.sleep(1) repo_url: str = 'https://github.com/' + repo_name - site.follow_repository(repo_url) + project = site.follow_repository(repo_url) + print("Saved the project: " + repo_name) + return project -def find_and_save_projects_to_lgtm(language: str): +def find_and_save_projects_to_lgtm(language: str) -> List[str]: github = utils.github_api.create() site = LGTMSite.create_from_file() + saved_project_data: List[str] = [] for date_range in utils.github_dates.generate_dates(): - repos = github.search_repositories(query=f'stars:>500 created:{date_range} sort:stars language:{language}') + repos = github.search_repositories(query=f'stars:>500 created:{date_range} fork:false sort:stars language:{language}') for repo in repos: # Github has rate limiting in place hence why we add a sleep here. More info can be found here: @@ -32,7 +36,14 @@ def find_and_save_projects_to_lgtm(language: str): if repo.archived or repo.fork: continue - save_project_to_lgtm(site, repo.full_name) + saved_project = save_project_to_lgtm(site, repo.full_name) + simple_project = LGTMDataFilters.build_simple_project(saved_project) + + if simple_project.is_valid_project: + saved_data = f'{simple_project.display_name},{simple_project.key},{simple_project.project_type}' + saved_project_data.append(saved_data) + + return saved_project_data if len(sys.argv) < 2: print("Please provide a language you want to search") @@ -41,4 +52,9 @@ def find_and_save_projects_to_lgtm(language: str): language = sys.argv[1].capitalize() print('Following the top repos for %s' % language) -find_and_save_projects_to_lgtm(language) +saved_project_data = find_and_save_projects_to_lgtm(language) + +# If the user provided a second arg then they want to create a custom list. +if len(sys.argv) <= 3: + custom_list_name = sys.argv[2] + utils.cacher.write_project_data_to_file(saved_project_data, custom_list_name) diff --git a/lgtm.py b/lgtm.py index 8750c40..6263a80 100644 --- a/lgtm.py +++ b/lgtm.py @@ -3,7 +3,8 @@ import requests import yaml - +from urllib3.util.retry import Retry +from requests.adapters import HTTPAdapter class LGTMRequestException(Exception): pass @@ -36,6 +37,13 @@ def _make_lgtm_get(self, url: str) -> dict: return r.json() def get_my_projects(self) -> List[dict]: + ''' + Returns a user's followed projects that are not in a custom list. + + Returns: + data (List[dict]): Response data from LGTM + ''' + url = 'https://lgtm.com/internal_api/v0.2/getMyProjects?apiVersion=' + self.api_version data = self._make_lgtm_get(url) if data['status'] == 'success': @@ -44,28 +52,57 @@ def get_my_projects(self) -> List[dict]: raise LGTMRequestException('LGTM GET request failed with response: %s' % str(data)) def get_my_projects_under_org(self, org: str) -> List['SimpleProject']: + ''' + Given an org name, returns a user's projects that are part of an org. + + Parameters: + org (str): An organization + + Returns: + projects (['SimpleProject']): List of SimpleProject's from LGTM part of an org. + ''' + projects_sorted = LGTMDataFilters.org_to_ids(self.get_my_projects()) return LGTMDataFilters.extract_project_under_org(org, projects_sorted) def _make_lgtm_post(self, url: str, data: dict) -> dict: + ''' + Makes a HTTP post request to LGTM.com + + Parameters: + url (str): A URL representing where the HTTP request goes + data (dict): Data that will be sent to LGTM.com in the request.. + + Returns: + data (dict): Data returned from LGTM.com response. + ''' + api_data = { 'apiVersion': self.api_version } full_data = {**api_data, **data} - print(data) - r = requests.post( + + session = requests.Session() + + retries = Retry(total=3, + backoff_factor=0.1, + status_forcelist=[ 500, 502, 503, 504 ]) + + session.mount('https://', HTTPAdapter(max_retries=retries)) + + r = session.post( url, full_data, cookies=self._cookies(), headers=self._headers() ) + try: data_returned = r.json() except ValueError as e: response_text = r.text raise LGTMRequestException(f'Failed to parse JSON. Response was: {response_text}') from e - print(data_returned) if data_returned['status'] == 'success': if 'data' in data_returned: return data_returned['data'] @@ -75,6 +112,14 @@ def _make_lgtm_post(self, url: str, data: dict) -> dict: raise LGTMRequestException('LGTM POST request failed with response: %s' % str(data_returned)) def load_into_project_list(self, into_project: int, lgtm_project_ids: List[str]): + ''' + Given a project list id and a list of project ids, add the projects to the project list on LGTM.com. + + Parameters: + into_project (int): Project list id + lgtm_project_ids (List[str]): List of project ids + ''' + url = "https://lgtm.com/internal_api/v0.2/updateProjectSelection" # Because LGTM uses some wacky format for it's application/x-www-form-urlencoded data list_serialized = ', '.join([('"' + str(elem) + '"') for elem in lgtm_project_ids]) @@ -89,7 +134,7 @@ def force_rebuild_all_proto_projects(self): org_to_projects = LGTMDataFilters.org_to_ids(self.get_my_projects()) for org in org_to_projects: for project in org_to_projects[org]: - if not project.is_protoproject: + if not project.is_protoproject(): continue self.force_rebuild_project(project) @@ -104,23 +149,44 @@ def force_rebuild_project(self, simple_project: 'SimpleProject'): except LGTMRequestException: print('Failed rebuilding project. This may be because it is already being built. `%s`' % simple_project) - def follow_repository(self, repository_url: str): + def follow_repository(self, repository_url: str) -> dict: url = "https://lgtm.com/internal_api/v0.2/followProject" data = { 'url': repository_url, 'apiVersion': self.api_version } - self._make_lgtm_post(url, data) + return self._make_lgtm_post(url, data) def unfollow_repository_by_id(self, project_id: str): + ''' + Given a project id, unfollows a repository. + + Parameters: + project_id (str): A project id + ''' + url = "https://lgtm.com/internal_api/v0.2/unfollowProject" data = { 'project_key': project_id, } self._make_lgtm_post(url, data) + def unfollow_proto_repository_by_id(self, project_id: str): + ''' + Given a project id, unfollows the proto repository. + + Parameters: + project_id (str): A project id + ''' + + url = "https://lgtm.com/internal_api/v0.2/unfollowProtoproject" + data = { + 'protoproject_key': project_id, + } + self._make_lgtm_post(url, data) + def unfollow_repository(self, simple_project: 'SimpleProject'): - url = "https://lgtm.com/internal_api/v0.2/unfollowProject" if not simple_project.is_protoproject \ + url = "https://lgtm.com/internal_api/v0.2/unfollowProject" if not simple_project.is_protoproject() \ else "https://lgtm.com/internal_api/v0.2/unfollowProtoproject" data = simple_project.make_post_data() self._make_lgtm_post(url, data) @@ -128,7 +194,7 @@ def unfollow_repository(self, simple_project: 'SimpleProject'): def unfollow_repository_by_org(self, org: str, include_protoproject: bool = False): projects_under_org = self.get_my_projects_under_org(org) for project in projects_under_org: - if not include_protoproject and project.is_protoproject: + if not include_protoproject and project.is_protoproject(): print("Not unfollowing project since it is a protoproject. %s" % project) continue print('Unfollowing project %s' % project.display_name) @@ -180,7 +246,16 @@ def add_org_to_project_list_by_list_name(self, org: str, project_name: str): @staticmethod def retrieve_project(gh_project_path: str): url = "https://lgtm.com/api/v1.0/projects/g/" + gh_project_path - r = requests.get(url) + + session = requests.Session() + + retries = Retry(total=3, + backoff_factor=0.1, + status_forcelist=[ 500, 502, 503, 504 ]) + + session.mount('https://', HTTPAdapter(max_retries=retries)) + + r = session.get(url) return r.json() @staticmethod @@ -205,17 +280,24 @@ def create_from_file() -> 'LGTMSite': @dataclass +# TODO: this SimpleProject is no longer 'simple'. Some refactoring here could be nice. class SimpleProject: display_name: str key: str - is_protoproject: bool + project_type: str + is_valid_project: bool + org: str + state: str def make_post_data(self): - data_dict_key = 'protoproject_key' if self.is_protoproject else 'project_key' + data_dict_key = 'protoproject_key' if self.is_protoproject() else 'project_key' return { data_dict_key: self.key } + def is_protoproject(self): + # The values for project_type should be hardcoded in one central location + return self.project_type == "protoproject" class LGTMDataFilters: @@ -227,43 +309,17 @@ def org_to_ids(projects: List[Dict]) -> Dict[str, List[SimpleProject]]: """ org_to_ids = {} for project in projects: - org: str - display_name: str - key: str - is_protoproject: bool - if 'protoproject' in project: - the_project = project['protoproject'] - if 'https://github.com/' not in the_project['cloneUrl']: - # Not really concerned with BitBucket right now - continue - display_name = the_project['displayName'] - org = display_name.split('/')[0] - key = the_project['key'] - is_protoproject = True - elif 'realProject' in project: - - the_project = project['realProject'][0] - if the_project['repoProvider'] != 'github_apps': - # Not really concerned with BitBucket right now - continue - org = str(the_project['slug']).split('/')[1] - display_name = the_project['displayName'] - key = the_project['key'] - is_protoproject = False - else: - raise KeyError('\'realProject\' nor \'protoproject\' in %s' % str(project)) + simple_project = LGTMDataFilters.build_simple_project(project) + if not simple_project.is_valid_project: + continue ids_list: List[SimpleProject] - if org in org_to_ids: - ids_list = org_to_ids[org] + if simple_project.org in org_to_ids: + ids_list = org_to_ids[simple_project.org] else: ids_list = [] - org_to_ids[org] = ids_list - ids_list.append(SimpleProject( - display_name=display_name, - key=key, - is_protoproject=is_protoproject - )) + org_to_ids[simple_project.org] = ids_list + ids_list.append(simple_project) return org_to_ids @@ -273,3 +329,45 @@ def extract_project_under_org(org: str, projects_sorted: Dict[str, List[SimplePr print('org %s not found in projects list' % org) return [] return projects_sorted[org] + + @staticmethod + def build_simple_project(project: dict) -> SimpleProject: + org: str + display_name: str + key: str + project_type: str + is_valid_project: bool = True + state: str = "" + + if 'protoproject' in project: + the_project = project['protoproject'] + if 'https://github.com/' not in the_project['cloneUrl']: + # Not really concerned with BitBucket right now + is_valid_project = False + display_name = the_project['displayName'] + state = the_project['state'] + org = display_name.split('/')[0] + key = the_project['key'] + project_type = 'protoproject' + elif 'realProject' in project: + the_project = project['realProject'][0] + if the_project['repoProvider'] != 'github_apps': + # Not really concerned with BitBucket right now + is_valid_project = False + org = str(the_project['slug']).split('/')[1] + display_name = the_project['displayName'] + key = the_project['key'] + project_type = "realProject" + else: + # We raise this in cases where we can't intrepret the data we get + # back from LGTM. + is_valid_project = False + + return SimpleProject( + display_name=display_name, + key=key, + project_type=project_type, + is_valid_project=is_valid_project, + org=org, + state=state + ) diff --git a/move_repos_to_lgtm_lists.py b/move_repos_to_lgtm_lists.py new file mode 100644 index 0000000..61a8e64 --- /dev/null +++ b/move_repos_to_lgtm_lists.py @@ -0,0 +1,38 @@ +from typing import List +from lgtm import LGTMSite + +import utils.cacher +import os + +def get_project_list_id(cached_file_name: str, site: 'LGTMSite') -> str: + project_list_name = cached_file_name.split(".")[0] + + return site.get_or_create_project_list(project_list_name) + +def process_cached_file(cached_file_name: str, site: 'LGTMSite'): + cached_file = "cache/" + cached_file_name + project_builds = utils.cacher.get_project_builds(cached_file) + followed_projects = site.get_my_projects() + + if project_builds.build_processes_in_progress(followed_projects): + print(f'The {cached_file_name} can\'t be processed at this time because a project build is still in progress.') + return + + project_list_id = get_project_list_id(cached_file_name, site) + + print("Moving followed projects to the project list") + site.load_into_project_list(project_list_id, project_builds.return_successful_project_builds(site)) + + # If a project fails to be processed by LGTM, we still unfollow the project. + print("Unfollowing projects") + project_builds.unfollow_projects(site) + print("Removing the cache file.") + utils.cacher.remove_file(cached_file) + print("Done processing cache file.") + +site = LGTMSite.create_from_file() + +for cached_file_name in os.listdir("cache"): + process_cached_file(cached_file_name, site) + +print("Finished!") diff --git a/unfollow_all_followed_projects.py b/unfollow_all_followed_projects.py new file mode 100644 index 0000000..15a1bb7 --- /dev/null +++ b/unfollow_all_followed_projects.py @@ -0,0 +1,10 @@ +from lgtm import LGTMSite, LGTMDataFilters + +site = LGTMSite.create_from_file() + +projects = site.get_my_projects() + +for project in projects: + simple_project = LGTMDataFilters.build_simple_project(project) + if simple_project.is_valid_project: + site.unfollow_repository(simple_project) diff --git a/utils/cacher.py b/utils/cacher.py new file mode 100644 index 0000000..a2334bd --- /dev/null +++ b/utils/cacher.py @@ -0,0 +1,181 @@ +from typing import List +import os +import time +from lgtm import LGTMSite, LGTMRequestException, LGTMDataFilters, SimpleProject + +# This is very similar to SimpleProject. If I had discovered SimpleProject earlier +# I would have built this code around that. +class ProjectBuild(SimpleProject): + def build_successful(self, followed_projects: List[dict]) -> bool: + if self.is_protoproject(): + # A throttle that although may not be necessary a nice plus. + time.sleep(2) + site = LGTMSite.create_from_file() + data = site.retrieve_project(self.display_name) + + # A failed protoproject build will always be intrepreted to LGTM as a project that can't be found. + if 'code' in data and data['code'] == 404: + return False + + # In this case, the protoproject likely succeeded. To confirm this, + # we check the language status to confirm the build succeeded. + for language in data['languages']: + if language['status'] == "success": + self.key = data['id'] + return True + + return ( + not self.build_in_progress(followed_projects) and + not self.build_failed(followed_projects) + ) + + def build_in_progress(self, followed_projects: List[dict]) -> bool: + return ( + self.project_currently_followed(followed_projects) and + self.project_state("build_attempt_in_progress", followed_projects) + ) + + def build_failed(self, followed_projects: List[dict]) -> bool: + return ( + self.project_currently_followed(followed_projects) and + self.project_state("build_attempt_failed", followed_projects) + ) + + def project_state(self, state: str, followed_projects: List[dict]) -> bool: + in_state = False + + for project in followed_projects: + simple_project = LGTMDataFilters.build_simple_project(project) + + if not simple_project.is_valid_project: + continue + + if not simple_project.display_name == self.display_name: + continue + + if simple_project.is_protoproject() and simple_project.state == state: + in_state = True + break + + # Real projects always have successful builds, or at least as far as I can tell. + if not simple_project.is_protoproject(): + in_state = not (state == "build_attempt_in_progress" or state == "build_attempt_failed") + break + + return in_state + + def project_currently_followed(self, followed_projects: List[dict]) -> bool: + currently_followed = False + for project in followed_projects: + simple_project = LGTMDataFilters.build_simple_project(project) + + if not simple_project.is_valid_project: + continue + + if simple_project.display_name == self.display_name: + currently_followed = True + break + + return currently_followed + +class ProjectBuilds: + def __init__(self, projects: List[ProjectBuild]): + self.projects = projects + + def unfollow_projects(self, site: 'LGTMSite'): + for project in self.projects: + time.sleep(2) + + if project.is_protoproject(): + # Protoprojects are gnarly because I believe LGTM updates the key + # if the protoproject succeeds. In case it does, we retrieve the + # latest id from LGTM then unfollow it. + data = site.retrieve_project(project.display_name) + + # A failed protoproject build will be intrepreted to LGTM + # as a project that can't be found. + if 'code' in data and data['code'] == 404: + continue + + self.unfollow_proto_project(site, data['id']) + else: + self.unfollow_real_project(site, project.key) + + + def unfollow_proto_project(self, site: 'LGTMSite', id: int): + try: + time.sleep(2) + + site.unfollow_proto_repository_by_id(id) + except LGTMRequestException as e: + # In some cases even though we've recorded the project as a protoproject + # it's actually a realproject. So we can't unfollow it via a proto-project + # unfollow API call. We can however unfollow it via the real project API call. + self.unfollow_real_project(site, id) + + def unfollow_real_project(self, site: 'LGTMSite', id: int): + try: + time.sleep(2) + + site.unfollow_repository_by_id(id) + except LGTMRequestException as e: + print(f"An unknown issue occurred unfollowing {project.display_name}") + + def return_successful_project_builds(self, site: 'LGTMSite') -> List[str]: + filtered_project_keys: List[str] = [] + followed_projects = site.get_my_projects() + + for project in self.projects: + if project.build_successful(followed_projects): + filtered_project_keys.append(project.key) + + return filtered_project_keys + + def build_processes_in_progress(self, followed_projects: List[dict]) -> bool: + in_progress = False + + for project in self.projects: + if project.build_in_progress(followed_projects): + in_progress = True + break + + return in_progress + +def create_cache_folder(): + if not os.path.exists('cache'): + os.makedirs('cache') + +def write_project_data_to_file(project_keys: List[str], file_name: str): + create_cache_folder() + + file = open("cache/" + file_name + ".txt", "a") + + for project_key in project_keys: + file.write(project_key + "\n") + + file.close() + +def get_project_builds(cached_file: str) -> ProjectBuilds: + file = open(cached_file, "r") + + cached_projects = file.read().split("\n") + + while("" in cached_projects): + cached_projects.remove("") + + for i, project in enumerate(cached_projects): + cached_projects[i] = ProjectBuild( + display_name=project.split(",")[0], + key=project.split(",")[1], + project_type=project.split(",")[2], + is_valid_project=True, + org="", + state="" + ) + + file.close() + + return ProjectBuilds(cached_projects) + +def remove_file(file_name: str): + os.remove(file_name)