import json import requests import config_parser # as per https://docs.gitlab.com/ee/api/rest/index.html#offset-based-pagination MAX_NUMBER_OF_ITEMS_PER_PAGE = 100 def write_to_file(response_array): f = open("output/gitlab-jobs1.txt", "a") f.write(str(response_array)) f.close() def find_jobs_by_name_that_were_run(list_of_jobs, job_name): name_filter = filter(lambda x: x['name'] == job_name and x['status'] == 'success', list_of_jobs) filtered_list_of_jobs = list(name_filter) print(filtered_list_of_jobs) return filtered_list_of_jobs def calc_number_of_pages_number(total_number): if total_number <= MAX_NUMBER_OF_ITEMS_PER_PAGE: return total_number, 1 else: return MAX_NUMBER_OF_ITEMS_PER_PAGE, round(total_number / MAX_NUMBER_OF_ITEMS_PER_PAGE) def filter_relevant_attributes(job): relevant_job = { 'status': job['status'], 'name': job['name'], 'ref': job['ref'], 'started_at': job['started_at'], 'commit_id': job['commit']['id'] } return relevant_job def find_jobs(count, project_id, job_name): number_of_items_per_page, number_of_pages = calc_number_of_pages_number(count) url_template = 'https://gitlab.atb-bremen.de/api/v4/projects/{}/jobs?per_page={}&page={}' # get the last 100x50 jobs = 5000 iterations = list(range(0, number_of_pages)) response_array = [] headers = { 'PRIVATE-TOKEN': config_parser.token } print("getting {} jobs from project {}...".format(count, project_id)) for i in iterations: response = requests.request("GET", url_template.format(project_id, number_of_items_per_page, i), headers=headers) json_response = response.text response_array.append(json.loads(json_response)) import itertools flat_list_of_jobs = list(itertools.chain(*response_array)) list_of_successfull_jobs = find_jobs_by_name_that_were_run(flat_list_of_jobs, job_name) # filter output for relevant fields return list(map(filter_relevant_attributes, list_of_successfull_jobs))