|
| 1 | +import argparse |
| 2 | +import os |
| 3 | +from typing import OrderedDict |
| 4 | +import pyexcel |
| 5 | +import requests |
| 6 | +import sys |
| 7 | + |
| 8 | + |
| 9 | +def get_headers(): |
| 10 | + access_token_env_var = "GITHUB_ACCESS_TOKEN" |
| 11 | + if access_token_env_var in os.environ: |
| 12 | + access_token = os.environ[access_token_env_var] |
| 13 | + return {"Authorization": f"token {access_token}"} |
| 14 | + else: |
| 15 | + print(f"{access_token_env_var} not present, performing unathenticated calls that might hit rate limits.") |
| 16 | + return None |
| 17 | + |
| 18 | + |
| 19 | +def fetch_items(url, params, headers): |
| 20 | + response = requests.get(url, params=params, headers=headers) |
| 21 | + if response.status_code == 200: |
| 22 | + return response |
| 23 | + else: |
| 24 | + print(f"Failed to fetch items: {response.status_code}", file=sys.stderr) |
| 25 | + print(f"{response.content}", file=sys.stderr) |
| 26 | + return None |
| 27 | + |
| 28 | + |
| 29 | +def extract_next_url(response): |
| 30 | + if 'Link' in response.headers: |
| 31 | + links = response.headers['Link'].split(',') |
| 32 | + for link in links: |
| 33 | + if 'rel="next"' in link: |
| 34 | + return link.split(';')[0].strip('<> ') |
| 35 | + return None |
| 36 | + |
| 37 | + |
| 38 | +def get_all_items(url, params, limit=None): |
| 39 | + items = [] |
| 40 | + headers = get_headers() |
| 41 | + while url: |
| 42 | + response = fetch_items(url, params, headers) |
| 43 | + if response: |
| 44 | + items.extend(response.json()) |
| 45 | + print(f"Processing {len(items)}", file=sys.stderr) |
| 46 | + if limit and len(items) > limit: |
| 47 | + break |
| 48 | + url = extract_next_url(response) |
| 49 | + else: |
| 50 | + url = None |
| 51 | + return items |
| 52 | + |
| 53 | + |
| 54 | +def get_open_issues(repo_url, limit): |
| 55 | + owner, repo = repo_url.rstrip('/').split('/')[-2:] |
| 56 | + url = f"https://api.github.com/repos/{owner}/{repo}/issues" |
| 57 | + params = {'state': 'open', 'per_page': 100} |
| 58 | + issues = get_all_items(url, params, limit) |
| 59 | + open_issues = [issue for issue in issues if 'pull_request' not in issue] |
| 60 | + return open_issues |
| 61 | + |
| 62 | + |
| 63 | +def get_open_pull_requests(repo_url, limit): |
| 64 | + owner, repo = repo_url.rstrip('/').split('/')[-2:] |
| 65 | + url = f"https://api.github.com/repos/{owner}/{repo}/pulls" |
| 66 | + params = {'state': 'open', 'per_page': 100} |
| 67 | + pull_requests = get_all_items(url, params, limit) |
| 68 | + return pull_requests |
| 69 | + |
| 70 | + |
| 71 | +def generate_ods(issues, pull_requests, filename, people): |
| 72 | + data = OrderedDict() |
| 73 | + |
| 74 | + # Prepare issues data |
| 75 | + issues_data = [] |
| 76 | + for n, issue in enumerate(issues): |
| 77 | + issues_data.append( |
| 78 | + [ |
| 79 | + issue['html_url'], |
| 80 | + issue['title'], |
| 81 | + issue['created_at'], |
| 82 | + issue['user']['login'], |
| 83 | + issue['assignee']['login'] if issue['assignee'] else 'None', |
| 84 | + people[n % len(people)], |
| 85 | + ] |
| 86 | + ) |
| 87 | + issues_headers = ['url', 'title', 'created_at', 'user', 'assignee', 'action'] |
| 88 | + issues_data.insert(0, issues_headers) |
| 89 | + data.update({"Issues": issues_data}) |
| 90 | + |
| 91 | + # Prepare pull requests data |
| 92 | + prs_data = [] |
| 93 | + for n, pr in enumerate(pull_requests): |
| 94 | + prs_data.append( |
| 95 | + [pr['html_url'], pr['title'], pr['created_at'], pr['user']['login'], pr['assignee']['login'] if pr['assignee'] else 'None', people[n % len(people)]] |
| 96 | + ) |
| 97 | + prs_headers = ['url', 'title', 'created_at', 'user', 'assignee', 'action'] |
| 98 | + prs_data.insert(0, prs_headers) |
| 99 | + data.update({"Pull Requests": prs_data}) |
| 100 | + |
| 101 | + # Save to ODS file |
| 102 | + pyexcel.save_book_as(bookdict=data, dest_file_name=filename) |
| 103 | + |
| 104 | + |
| 105 | +def main(): |
| 106 | + parser = argparse.ArgumentParser() |
| 107 | + parser.add_argument("--limit", type=int, help="minimum number of issues/PRs to pull [Pulls all by default]", default=None) |
| 108 | + parser.add_argument("--out", type=str, help="output file name [awx_community-triage.ods]", default="awx_community-triage.ods") |
| 109 | + parser.add_argument("--repository-url", type=str, help="repository url [https://github.com/ansible/awx]", default="https://github.com/ansible/awx") |
| 110 | + parser.add_argument("--people", type=str, help="comma separated list of names to distribute the issues/PRs among [Alice,Bob]", default="Alice,Bob") |
| 111 | + args = parser.parse_args() |
| 112 | + limit = args.limit |
| 113 | + output_file_name = args.out |
| 114 | + repo_url = args.repository_url |
| 115 | + people = str(args.people).split(",") |
| 116 | + open_issues = get_open_issues(repo_url, limit) |
| 117 | + open_pull_requests = get_open_pull_requests(repo_url, limit) |
| 118 | + print(f"Open issues: {len(open_issues)}") |
| 119 | + print(f"Open Pull Requests: {len(open_pull_requests)}") |
| 120 | + generate_ods(open_issues, open_pull_requests, output_file_name, people) |
| 121 | + print(f"Generated {output_file_name} with open issues and pull requests.") |
| 122 | + |
| 123 | + |
| 124 | +if __name__ == "__main__": |
| 125 | + main() |
0 commit comments