codelion commited on
Commit
058ded2
1 Parent(s): 8c3788f

Upload 2 files

Browse files
Files changed (2) hide show
  1. _script_for_eval.py +125 -224
  2. _script_for_gen.py +403 -124
_script_for_eval.py CHANGED
@@ -1,231 +1,132 @@
1
  import os
2
- import argparse
3
  import json
4
- import numpy as np
5
- from tqdm import tqdm
6
- import nltk
7
- from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
8
- from rouge import Rouge
9
- from sklearn.feature_extraction.text import TfidfVectorizer
10
- from sklearn.metrics.pairwise import cosine_similarity
11
- import re
12
- from textstat import flesch_reading_ease
13
- from datasets import load_dataset
14
- import openai
15
- from datetime import datetime
16
-
17
- nltk.download('punkt', quiet=True)
18
- nltk.download('averaged_perceptron_tagger', quiet=True)
19
-
20
- def preprocess(text):
21
- return nltk.word_tokenize(text.lower())
22
-
23
- def calculate_bleu(reference, candidate):
24
- reference_tokens = preprocess(reference)
25
- candidate_tokens = preprocess(candidate)
26
- smoothie = SmoothingFunction().method1
27
- return sentence_bleu([reference_tokens], candidate_tokens, smoothing_function=smoothie)
28
-
29
- def calculate_rouge(reference, candidate):
30
- rouge = Rouge()
31
- scores = rouge.get_scores(candidate, reference)
32
- return {
33
- 'rouge-1': scores[0]['rouge-1']['f'],
34
- 'rouge-2': scores[0]['rouge-2']['f'],
35
- 'rouge-l': scores[0]['rouge-l']['f']
36
- }
37
-
38
- def calculate_cosine_similarity(reference, candidate):
39
- vectorizer = TfidfVectorizer()
40
- tfidf_matrix = vectorizer.fit_transform([reference, candidate])
41
- return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
42
-
43
- def extract_sections(readme):
44
- sections = []
45
- current_section = ""
46
- for line in readme.split('\n'):
47
- if line.strip().startswith('#'):
48
- if current_section:
49
- sections.append(current_section.strip())
50
- current_section = line + "\n"
51
- else:
52
- current_section += line + "\n"
53
- if current_section:
54
- sections.append(current_section.strip())
55
- return sections
56
-
57
- def calculate_structural_similarity(reference, candidate):
58
- ref_sections = extract_sections(reference)
59
- cand_sections = extract_sections(candidate)
60
-
61
- section_diff = abs(len(ref_sections) - len(cand_sections))
62
-
63
- ref_titles = [s.split('\n')[0] for s in ref_sections]
64
- cand_titles = [s.split('\n')[0] for s in cand_sections]
65
- title_similarity = len(set(ref_titles) & set(cand_titles)) / max(len(ref_titles), len(cand_titles))
66
-
67
- return {
68
- 'section_difference': section_diff,
69
- 'title_similarity': title_similarity
70
- }
71
-
72
- def information_retrieval_score(readme):
73
- key_sections = ['installation', 'usage', 'api', 'example', 'license']
74
- found_sections = sum(1 for section in key_sections if section in readme.lower())
75
- return found_sections / len(key_sections)
76
-
77
- def code_readme_consistency(repo_content, readme):
78
- code_elements = set(re.findall(r'def\s+(\w+)', repo_content) +
79
- re.findall(r'class\s+(\w+)', repo_content))
80
-
81
- mentioned_elements = sum(1 for element in code_elements if element in readme)
82
-
83
- return mentioned_elements / len(code_elements) if code_elements else 0
84
-
85
- def calculate_readability(text):
86
- return flesch_reading_ease(text) / 100
87
-
88
- def evaluate_readme(reference_readme, generated_readme, repo_content):
89
- bleu_score = calculate_bleu(reference_readme, generated_readme)
90
- rouge_scores = calculate_rouge(reference_readme, generated_readme)
91
- cosine_sim = calculate_cosine_similarity(reference_readme, generated_readme)
92
- structural_sim = calculate_structural_similarity(reference_readme, generated_readme)
93
- info_retrieval = information_retrieval_score(generated_readme)
94
- code_consistency = code_readme_consistency(repo_content, generated_readme)
95
- readability = calculate_readability(generated_readme)
96
-
97
- weights = {
98
- 'bleu': 0.1,
99
- 'rouge-1': 0.1,
100
- 'rouge-2': 0.1,
101
- 'rouge-l': 0.1,
102
- 'cosine_similarity': 0.1,
103
- 'structural_similarity': 0.1,
104
- 'information_retrieval': 0.15,
105
- 'code_consistency': 0.15,
106
- 'readability': 0.1
107
- }
108
-
109
- weighted_score = (
110
- weights['bleu'] * bleu_score +
111
- weights['rouge-1'] * rouge_scores['rouge-1'] +
112
- weights['rouge-2'] * rouge_scores['rouge-2'] +
113
- weights['rouge-l'] * rouge_scores['rouge-l'] +
114
- weights['cosine_similarity'] * cosine_sim +
115
- weights['structural_similarity'] * structural_sim['title_similarity'] +
116
- weights['information_retrieval'] * info_retrieval +
117
- weights['code_consistency'] * code_consistency +
118
- weights['readability'] * readability
119
- )
120
-
121
- return {
122
- 'bleu': bleu_score,
123
- 'rouge': rouge_scores,
124
- 'cosine_similarity': cosine_sim,
125
- 'structural_similarity': structural_sim,
126
- 'information_retrieval': info_retrieval,
127
- 'code_consistency': code_consistency,
128
- 'readability': readability,
129
- 'weighted_score': weighted_score
130
- }
131
-
132
- def generate_readme(repo_content, model, client):
133
- system_prompt = """You are an AI assistant tasked with creating a README.md file for a GitHub repository.
134
- Your response should contain ONLY the content of the README.md file, without any additional explanations or markdown code blocks.
135
- The README should include the following sections:
136
- 1. Project Title
137
- 2. Description
138
- 3. Installation
139
- 4. Usage
140
- 5. Features
141
- 6. Contributing
142
- 7. License
143
- Ensure that your response is well-structured, informative, and directly usable as a README.md file."""
144
-
145
- user_prompt = f"Here is the content of the repository:\n\n{repo_content}\n\nBased on this content, please generate a README.md file."
146
-
147
- response = client.chat.completions.create(
148
- model=model,
149
- messages=[
150
- {"role": "system", "content": system_prompt},
151
- {"role": "user", "content": user_prompt}
152
- ]
153
- )
154
-
155
- return response.choices[0].message.content
156
-
157
- def main(args):
158
- openai.api_key = os.getenv("OPENAI_API_KEY")
159
- if not openai.api_key:
160
- raise ValueError("OPENAI_API_KEY environment variable is not set")
161
-
162
- client = openai.OpenAI(base_url=args.base_url) if args.base_url else openai.OpenAI()
163
-
164
- dataset = load_dataset("patched-codes/generate-readme-eval")
165
-
166
- results = []
167
-
168
- for item in tqdm(dataset['test'], desc="Processing repos"):
169
- try:
170
- generated_readme = generate_readme(item['repo_content'], args.model, client)
171
- eval_result = evaluate_readme(item['repo_readme'], generated_readme, item['repo_content'])
172
- # Add repo_name to the eval_result
173
- eval_result['repo_name'] = item['repo_name']
174
- results.append(eval_result)
175
- except Exception as e:
176
- print(f"Error processing repo {item['repo_name']}: {e}")
177
  continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
 
179
- average_scores = {
180
- 'bleu': np.mean([r['bleu'] for r in results]),
181
- 'rouge-1': np.mean([r['rouge']['rouge-1'] for r in results]),
182
- 'rouge-2': np.mean([r['rouge']['rouge-2'] for r in results]),
183
- 'rouge-l': np.mean([r['rouge']['rouge-l'] for r in results]),
184
- 'cosine_similarity': np.mean([r['cosine_similarity'] for r in results]),
185
- 'title_similarity': np.mean([r['structural_similarity']['title_similarity'] for r in results]),
186
- 'information_retrieval': np.mean([r['information_retrieval'] for r in results]),
187
- 'code_consistency': np.mean([r['code_consistency'] for r in results]),
188
- 'readability': np.mean([r['readability'] for r in results]),
189
- 'weighted_score': np.mean([r['weighted_score'] for r in results])
190
- }
191
-
192
- # Print results to console
193
- print("\nEvaluation Results:")
194
- for metric, score in average_scores.items():
195
- print(f"{metric}: {score:.4f}")
196
-
197
- # Save results to log file
198
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
199
- log_filename = f"{args.model}_results_{timestamp}.log"
200
-
201
- with open(log_filename, 'w') as log_file:
202
- log_file.write(f"Evaluation Results for model: {args.model}\n")
203
- log_file.write(f"Timestamp: {timestamp}\n\n")
204
- log_file.write("Average Scores:\n")
205
- for metric, score in average_scores.items():
206
- log_file.write(f"{metric}: {score:.4f}\n")
207
 
208
- log_file.write(f"\nDetailed Results:\n")
209
- for result in results:
210
- log_file.write(f"\nRepository: {result['repo_name']}\n")
211
- log_file.write("Scores:\n")
212
- log_file.write(f" BLEU: {result['bleu']:.4f}\n")
213
- log_file.write(f" ROUGE-1: {result['rouge']['rouge-1']:.4f}\n")
214
- log_file.write(f" ROUGE-2: {result['rouge']['rouge-2']:.4f}\n")
215
- log_file.write(f" ROUGE-L: {result['rouge']['rouge-l']:.4f}\n")
216
- log_file.write(f" Cosine Similarity: {result['cosine_similarity']:.4f}\n")
217
- log_file.write(f" Title Similarity: {result['structural_similarity']['title_similarity']:.4f}\n")
218
- log_file.write(f" Information Retrieval: {result['information_retrieval']:.4f}\n")
219
- log_file.write(f" Code Consistency: {result['code_consistency']:.4f}\n")
220
- log_file.write(f" Readability: {result['readability']:.4f}\n")
221
- log_file.write(f" Weighted Score: {result['weighted_score']:.4f}\n")
222
-
223
- print(f"\nResults saved to {log_filename}")
224
 
225
  if __name__ == "__main__":
226
- parser = argparse.ArgumentParser(description="Generate and evaluate README files using OpenAI API")
227
- parser.add_argument("model", help="OpenAI model to use")
228
- parser.add_argument("--base_url", help="Optional base URL for OpenAI API", default=None)
229
- args = parser.parse_args()
230
-
231
- main(args)
 
1
  import os
2
+ import subprocess
3
  import json
4
+ import logging
5
+ from github import Github
6
+ import tiktoken
7
+ from pathlib import Path
8
+ import shutil
9
+
10
+ # Set up logging
11
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
+
13
+ # Initialize GitHub client
14
+ github_token = os.getenv('GITHUB_TOKEN')
15
+ if not github_token:
16
+ raise ValueError("GITHUB_TOKEN environment variable is not set")
17
+ g = Github(github_token)
18
+
19
+ # Initialize tokenizer
20
+ tokenizer = tiktoken.get_encoding("cl100k_base")
21
+
22
+ def clone_repo(repo_url, repo_name):
23
+ tmp_dir = f"/tmp/{repo_name}"
24
+ subprocess.run(["git", "clone", "--depth", "1", repo_url, tmp_dir], check=True)
25
+ return tmp_dir
26
+
27
+ def get_repo_content(repo_dir):
28
+ content = []
29
+ for root, dirs, files in os.walk(repo_dir):
30
+ if "test" in root.lower() or "example" in root.lower():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  continue
32
+ for file in files:
33
+ if file.endswith('.py'):
34
+ file_path = os.path.join(root, file)
35
+ relative_path = os.path.relpath(file_path, repo_dir)
36
+ with open(file_path, 'r') as f:
37
+ file_content = f.read()
38
+ content.append(f"File: {relative_path}\n\n{file_content}\n\n")
39
+ return "\n".join(content)
40
+
41
+ def count_tokens(text):
42
+ return len(tokenizer.encode(text))
43
+
44
+ def process_repo(repo):
45
+ repo_name = repo.name
46
+ repo_url = repo.clone_url
47
+ logging.info(f"Processing repository: {repo_name}")
48
+
49
+ try:
50
+ tmp_dir = clone_repo(repo_url, repo_name)
51
+ readme_path = os.path.join(tmp_dir, "README.md")
52
+
53
+ if not os.path.exists(readme_path):
54
+ logging.info(f"README.md not found in {repo_name}")
55
+ return None
56
+
57
+ repo_content = get_repo_content(tmp_dir)
58
+ if count_tokens(repo_content) >= 100000:
59
+ logging.info(f"Repository {repo_name} content exceeds 100k tokens")
60
+ return None
61
+
62
+ with open(readme_path, 'r') as f:
63
+ readme_content = f.read()
64
+
65
+ repo_commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=tmp_dir).decode().strip()
66
+
67
+ return {
68
+ "repo_name": repo_name,
69
+ "repo_commit": repo_commit,
70
+ "repo_content": repo_content,
71
+ "repo_readme": readme_content
72
+ }
73
+ except Exception as e:
74
+ logging.error(f"Error processing repository {repo_name}: {str(e)}")
75
+ return None
76
+ finally:
77
+ if 'tmp_dir' in locals():
78
+ shutil.rmtree(tmp_dir)
79
+
80
+ def load_existing_data(filename):
81
+ existing_data = {}
82
+ if os.path.exists(filename):
83
+ with open(filename, "r") as f:
84
+ for line in f:
85
+ item = json.loads(line)
86
+ existing_data[item['repo_name']] = item
87
+ return existing_data
88
+
89
+ def save_dataset(filename, dataset, mode='a'):
90
+ with open(filename, mode) as f:
91
+ for item in dataset:
92
+ json.dump(item, f)
93
+ f.write("\n")
94
+
95
+ def main():
96
+ g = Github(os.getenv('GITHUB_TOKEN'))
97
+ filename = "generate-readme-eval.jsonl"
98
+ existing_data = load_existing_data(filename)
99
 
100
+ new_dataset = []
101
+ updated_count = 0
102
+ skipped_count = 0
103
+
104
+ repos = g.search_repositories(query="language:python stars:>1000 forks:>100", sort="stars", order="desc")
105
+
106
+ for i, repo in enumerate(repos[200:400]):
107
+ if repo.full_name in existing_data:
108
+ existing_item = existing_data[repo.full_name]
109
+ if existing_item['repo_commit'] == repo.get_commits()[0].sha:
110
+ skipped_count += 1
111
+ logging.info(f"Skipped {repo.full_name}: Already processed with same commit")
112
+ continue
113
+ else:
114
+ logging.info(f"Updating {repo.full_name}: Commit changed")
115
+ updated_count += 1
116
+
117
+ item = process_repo(repo)
118
+ if item:
119
+ new_dataset.append(item)
 
 
 
 
 
 
 
 
120
 
121
+ if i % 10 == 0:
122
+ logging.info(f"Processed {i+1} repositories")
123
+
124
+ # Append new and updated items to the file
125
+ save_dataset(filename, new_dataset, mode='a')
126
+
127
+ logging.info(f"Dataset updated with {len(new_dataset)} new/updated items")
128
+ logging.info(f"Skipped {skipped_count} repositories (no changes)")
129
+ logging.info(f"Updated {updated_count} repositories")
 
 
 
 
 
 
 
130
 
131
  if __name__ == "__main__":
132
+ main()
 
 
 
 
 
_script_for_gen.py CHANGED
@@ -1,132 +1,411 @@
1
  import os
2
- import subprocess
3
  import json
4
- import logging
5
- from github import Github
6
- import tiktoken
7
- from pathlib import Path
8
- import shutil
9
-
10
- # Set up logging
11
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
12
-
13
- # Initialize GitHub client
14
- github_token = os.getenv('GITHUB_TOKEN')
15
- if not github_token:
16
- raise ValueError("GITHUB_TOKEN environment variable is not set")
17
- g = Github(github_token)
18
-
19
- # Initialize tokenizer
20
- tokenizer = tiktoken.get_encoding("cl100k_base")
21
-
22
- def clone_repo(repo_url, repo_name):
23
- tmp_dir = f"/tmp/{repo_name}"
24
- subprocess.run(["git", "clone", "--depth", "1", repo_url, tmp_dir], check=True)
25
- return tmp_dir
26
-
27
- def get_repo_content(repo_dir):
28
- content = []
29
- for root, dirs, files in os.walk(repo_dir):
30
- if "test" in root.lower() or "example" in root.lower():
31
- continue
32
- for file in files:
33
- if file.endswith('.py'):
34
- file_path = os.path.join(root, file)
35
- relative_path = os.path.relpath(file_path, repo_dir)
36
- with open(file_path, 'r') as f:
37
- file_content = f.read()
38
- content.append(f"File: {relative_path}\n\n{file_content}\n\n")
39
- return "\n".join(content)
40
-
41
- def count_tokens(text):
42
- return len(tokenizer.encode(text))
43
-
44
- def process_repo(repo):
45
- repo_name = repo.name
46
- repo_url = repo.clone_url
47
- logging.info(f"Processing repository: {repo_name}")
48
-
49
- try:
50
- tmp_dir = clone_repo(repo_url, repo_name)
51
- readme_path = os.path.join(tmp_dir, "README.md")
52
-
53
- if not os.path.exists(readme_path):
54
- logging.info(f"README.md not found in {repo_name}")
55
- return None
56
-
57
- repo_content = get_repo_content(tmp_dir)
58
- if count_tokens(repo_content) >= 100000:
59
- logging.info(f"Repository {repo_name} content exceeds 100k tokens")
60
- return None
61
-
62
- with open(readme_path, 'r') as f:
63
- readme_content = f.read()
64
-
65
- repo_commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=tmp_dir).decode().strip()
66
-
67
- return {
68
- "repo_name": repo_name,
69
- "repo_commit": repo_commit,
70
- "repo_content": repo_content,
71
- "repo_readme": readme_content
72
- }
73
- except Exception as e:
74
- logging.error(f"Error processing repository {repo_name}: {str(e)}")
75
- return None
76
- finally:
77
- if 'tmp_dir' in locals():
78
- shutil.rmtree(tmp_dir)
79
-
80
- def load_existing_data(filename):
81
- existing_data = {}
82
- if os.path.exists(filename):
83
- with open(filename, "r") as f:
84
- for line in f:
85
- item = json.loads(line)
86
- existing_data[item['repo_name']] = item
87
- return existing_data
88
-
89
- def save_dataset(filename, dataset, mode='a'):
90
- with open(filename, mode) as f:
91
- for item in dataset:
92
- json.dump(item, f)
93
- f.write("\n")
94
-
95
- def main():
96
- g = Github(os.getenv('GITHUB_TOKEN'))
97
- filename = "generate-readme-eval.jsonl"
98
- existing_data = load_existing_data(filename)
99
-
100
- new_dataset = []
101
- updated_count = 0
102
- skipped_count = 0
103
-
104
- repos = g.search_repositories(query="language:python stars:>1000 forks:>100", sort="stars", order="desc")
105
-
106
- for i, repo in enumerate(repos[200:400]):
107
- if repo.full_name in existing_data:
108
- existing_item = existing_data[repo.full_name]
109
- if existing_item['repo_commit'] == repo.get_commits()[0].sha:
110
- skipped_count += 1
111
- logging.info(f"Skipped {repo.full_name}: Already processed with same commit")
112
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
  else:
114
- logging.info(f"Updating {repo.full_name}: Commit changed")
115
- updated_count += 1
116
-
117
- item = process_repo(repo)
118
- if item:
119
- new_dataset.append(item)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
- if i % 10 == 0:
122
- logging.info(f"Processed {i+1} repositories")
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- # Append new and updated items to the file
125
- save_dataset(filename, new_dataset, mode='a')
126
 
127
- logging.info(f"Dataset updated with {len(new_dataset)} new/updated items")
128
- logging.info(f"Skipped {skipped_count} repositories (no changes)")
129
- logging.info(f"Updated {updated_count} repositories")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  if __name__ == "__main__":
132
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import argparse
3
  import json
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+ import nltk
7
+ from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
8
+ from rouge import Rouge
9
+ from sklearn.feature_extraction.text import TfidfVectorizer
10
+ from sklearn.metrics.pairwise import cosine_similarity
11
+ import re
12
+ from textstat import flesch_reading_ease
13
+ from datasets import load_dataset
14
+ import openai
15
+ import time
16
+ from datetime import datetime
17
+ import google.generativeai as genai
18
+ import traceback
19
+
20
+ SLEEP_INTERVAL = 30
21
+
22
+ nltk.download('punkt', quiet=True)
23
+ nltk.download('averaged_perceptron_tagger', quiet=True)
24
+
25
+ def create_client(model_name, base_url):
26
+ if model_name.lower().startswith('gemini'):
27
+ api_key = os.getenv("GOOGLE_API_KEY")
28
+ if not api_key:
29
+ raise ValueError("GOOGLE_API_KEY environment variable is not set")
30
+ genai.configure(api_key=api_key)
31
+ return 'gemini'
32
+ else:
33
+ api_key = os.getenv("OPENAI_API_KEY")
34
+ if not api_key:
35
+ raise ValueError("OPENAI_API_KEY environment variable is not set")
36
+ return openai.OpenAI(api_key=api_key) if base_url is None else openai.OpenAI(api_key=api_key, base_url=base_url)
37
+
38
+ def preprocess(text):
39
+ return nltk.word_tokenize(text.lower())
40
+
41
+ def calculate_bleu(reference, candidate):
42
+ reference_tokens = preprocess(reference)
43
+ candidate_tokens = preprocess(candidate)
44
+ smoothie = SmoothingFunction().method1
45
+ return sentence_bleu([reference_tokens], candidate_tokens, smoothing_function=smoothie)
46
+
47
+ def calculate_rouge(reference, candidate):
48
+ rouge = Rouge()
49
+ scores = rouge.get_scores(candidate, reference)
50
+ return {
51
+ 'rouge-1': scores[0]['rouge-1']['f'],
52
+ 'rouge-2': scores[0]['rouge-2']['f'],
53
+ 'rouge-l': scores[0]['rouge-l']['f']
54
+ }
55
+
56
+ def calculate_cosine_similarity(reference, candidate):
57
+ vectorizer = TfidfVectorizer()
58
+ tfidf_matrix = vectorizer.fit_transform([reference, candidate])
59
+ return cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0]
60
+
61
+ def extract_sections(readme):
62
+ sections = []
63
+ current_section = ""
64
+ for line in readme.split('\n'):
65
+ if line.strip().startswith('#'):
66
+ if current_section:
67
+ sections.append(current_section.strip())
68
+ current_section = line + "\n"
69
+ else:
70
+ current_section += line + "\n"
71
+ if current_section:
72
+ sections.append(current_section.strip())
73
+ return sections
74
+
75
+ def calculate_structural_similarity(reference, candidate):
76
+ ref_sections = extract_sections(reference)
77
+ cand_sections = extract_sections(candidate)
78
+
79
+ # Calculate section difference
80
+ max_sections = max(len(ref_sections), len(cand_sections))
81
+ section_diff = abs(len(ref_sections) - len(cand_sections))
82
+ section_similarity = 1 - (section_diff / max_sections) if max_sections > 0 else 0
83
+
84
+ # Calculate title similarity
85
+ ref_titles = [s.split('\n')[0] for s in ref_sections]
86
+ cand_titles = [s.split('\n')[0] for s in cand_sections]
87
+ title_similarity = len(set(ref_titles) & set(cand_titles)) / max(len(ref_titles), len(cand_titles)) if ref_titles or cand_titles else 0
88
+
89
+ # Combine section and title similarity
90
+ structural_similarity = (section_similarity + title_similarity) / 2
91
+
92
+ return structural_similarity
93
+
94
+ def information_retrieval_score(readme):
95
+ key_sections = ['installation', 'usage', 'api', 'example', 'license']
96
+ found_sections = sum(1 for section in key_sections if section in readme.lower())
97
+ return found_sections / len(key_sections)
98
+
99
+ def code_readme_consistency(repo_content, readme):
100
+ code_elements = set(re.findall(r'def\s+(\w+)', repo_content) +
101
+ re.findall(r'class\s+(\w+)', repo_content))
102
+
103
+ mentioned_elements = sum(1 for element in code_elements if element in readme)
104
+
105
+ return mentioned_elements / len(code_elements) if code_elements else 0
106
+
107
+ def calculate_readability(text):
108
+ return flesch_reading_ease(text) / 100
109
+
110
+ def evaluate_readme(reference_readme, generated_readme, repo_content):
111
+ bleu_score = calculate_bleu(reference_readme, generated_readme)
112
+ rouge_scores = calculate_rouge(reference_readme, generated_readme)
113
+ cosine_sim = calculate_cosine_similarity(reference_readme, generated_readme)
114
+ structural_sim = calculate_structural_similarity(reference_readme, generated_readme)
115
+ info_retrieval = information_retrieval_score(generated_readme)
116
+ code_consistency = code_readme_consistency(repo_content, generated_readme)
117
+ readability = calculate_readability(generated_readme)
118
+
119
+ weights = {
120
+ 'bleu': 0.1,
121
+ 'rouge-1': 0.033,
122
+ 'rouge-2': 0.033,
123
+ 'rouge-l': 0.034,
124
+ 'cosine_similarity': 0.1,
125
+ 'structural_similarity': 0.1,
126
+ 'information_retrieval': 0.2,
127
+ 'code_consistency': 0.2,
128
+ 'readability': 0.2
129
+ }
130
+
131
+ weighted_score = (
132
+ weights['bleu'] * bleu_score +
133
+ weights['rouge-1'] * rouge_scores['rouge-1'] +
134
+ weights['rouge-2'] * rouge_scores['rouge-2'] +
135
+ weights['rouge-l'] * rouge_scores['rouge-l'] +
136
+ weights['cosine_similarity'] * cosine_sim +
137
+ weights['structural_similarity'] * structural_sim +
138
+ weights['information_retrieval'] * info_retrieval +
139
+ weights['code_consistency'] * code_consistency +
140
+ weights['readability'] * readability
141
+ )
142
+
143
+ return {
144
+ 'bleu': bleu_score,
145
+ 'rouge': rouge_scores,
146
+ 'cosine_similarity': cosine_sim,
147
+ 'structural_similarity': structural_sim,
148
+ 'information_retrieval': info_retrieval,
149
+ 'code_consistency': code_consistency,
150
+ 'readability': readability,
151
+ 'weighted_score': weighted_score
152
+ }
153
+
154
+ def generate_readme_openai(repo_content, model, client):
155
+ system_prompt = """You are an AI assistant tasked with creating a README.md file for a GitHub repository.
156
+ Your response should contain ONLY the content of the README.md file, without any additional explanations or markdown code blocks.
157
+ The README should include the following sections:
158
+ 1. Project Title
159
+ 2. Description
160
+ 3. Installation
161
+ 4. Usage
162
+ 5. Features
163
+ 6. Contributing
164
+ 7. License
165
+ Ensure that your response is well-structured, informative, and directly usable as a README.md file."""
166
+
167
+ user_prompt = f"Here is the content of the repository:\n\n{repo_content}\n\nBased on this content, please generate a README.md file."
168
+
169
+ response = client.chat.completions.create(
170
+ model=model,
171
+ messages=[
172
+ {"role": "system", "content": system_prompt},
173
+ {"role": "user", "content": user_prompt}
174
+ ]
175
+ )
176
+
177
+ return response.choices[0].message.content
178
+
179
+ def generate_readme_gemini(repo_content, model):
180
+ safe = [
181
+ {
182
+ "category": "HARM_CATEGORY_HARASSMENT",
183
+ "threshold": "BLOCK_NONE",
184
+ },
185
+ {
186
+ "category": "HARM_CATEGORY_HATE_SPEECH",
187
+ "threshold": "BLOCK_NONE",
188
+ },
189
+ {
190
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
191
+ "threshold": "BLOCK_NONE",
192
+ },
193
+ {
194
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
195
+ "threshold": "BLOCK_NONE",
196
+ },
197
+ ]
198
+ prompt = f"""Create a README.md file for a GitHub repository based on the following repository content.
199
+ Your response should contain ONLY the content of the README.md file, without any additional explanations or markdown code blocks.
200
+ The README should include the following sections:
201
+ 1. Project Title
202
+ 2. Description
203
+ 3. Installation
204
+ 4. Usage
205
+ 5. Features
206
+ 6. Contributing
207
+ 7. License
208
+ Ensure that your response is well-structured, informative, and directly usable as a README.md file.
209
+
210
+ Repository content:
211
+
212
+ {repo_content}
213
+ """
214
+
215
+ model = genai.GenerativeModel(model,safety_settings=safe)
216
+ response = model.generate_content(prompt)
217
+
218
+ return response.text
219
+
220
+ def generate_readme(repo_content, model_name, client):
221
+ if client == 'gemini':
222
+ return generate_readme_gemini(repo_content, model_name)
223
+ else:
224
+ return generate_readme_openai(repo_content, model_name, client)
225
+
226
+ def main(args):
227
+ dataset = load_dataset("patched-codes/generate-readme-eval")
228
+
229
+ results = []
230
+
231
+ if args.generate_fine_tune_jsonl:
232
+ output_file = f"fine_tune_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.jsonl"
233
+ generate_fine_tune_jsonl(dataset, output_file)
234
+ print(f"Fine-tune JSONL file generated: {output_file}")
235
+ return
236
+
237
+ if args.oracle:
238
+ model_name = "oracle"
239
+ else:
240
+ model_name = args.model
241
+ client = create_client(model_name, args.base_url)
242
+
243
+ for item in tqdm(dataset['test'], desc="Processing repos"):
244
+ try:
245
+ if args.oracle:
246
+ # Use the existing README as both reference and generated
247
+ generated_readme = item['repo_readme']
248
+ elif args.n_shot > 0:
249
+ generated_readme = generate_readme_n_shot(item['repo_content'], model_name, client, dataset['train'], args.n_shot)
250
  else:
251
+ generated_readme = generate_readme(item['repo_content'], model_name, client)
252
+
253
+ eval_result = evaluate_readme(item['repo_readme'], generated_readme, item['repo_content'])
254
+ eval_result['repo_name'] = item['repo_name']
255
+ results.append(eval_result)
256
+ except Exception as e:
257
+ print(f"Error processing repo {item['repo_name']}: {e}")
258
+ continue
259
+ if model_name.lower().startswith('gemini'):
260
+ time.sleep(SLEEP_INTERVAL)
261
+
262
+ average_scores = {
263
+ 'bleu': np.mean([r['bleu'] for r in results]),
264
+ 'rouge-1': np.mean([r['rouge']['rouge-1'] for r in results]),
265
+ 'rouge-2': np.mean([r['rouge']['rouge-2'] for r in results]),
266
+ 'rouge-l': np.mean([r['rouge']['rouge-l'] for r in results]),
267
+ 'cosine_similarity': np.mean([r['cosine_similarity'] for r in results]),
268
+ 'structural_similarity': np.mean([r['structural_similarity'] for r in results]),
269
+ 'information_retrieval': np.mean([r['information_retrieval'] for r in results]),
270
+ 'code_consistency': np.mean([r['code_consistency'] for r in results]),
271
+ 'readability': np.mean([r['readability'] for r in results]),
272
+ 'weighted_score': np.mean([r['weighted_score'] for r in results])
273
+ }
274
+
275
+ # Print results to console
276
+ print("\nEvaluation Results:")
277
+ for metric, score in average_scores.items():
278
+ print(f"{metric}: {score:.4f}")
279
+
280
+ # Save results to log file
281
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
282
+ log_filename = f"{model_name}_results_{timestamp}.log"
283
+
284
+ with open(log_filename, 'w') as log_file:
285
+ log_file.write(f"Evaluation Results for model: {model_name}\n")
286
+ log_file.write(f"Timestamp: {timestamp}\n\n")
287
+ log_file.write("Average Scores:\n")
288
+ for metric, score in average_scores.items():
289
+ log_file.write(f"{metric}: {score:.4f}\n")
290
 
291
+ log_file.write(f"\nDetailed Results:\n")
292
+ for result in results:
293
+ log_file.write(f"\nRepository: {result['repo_name']}\n")
294
+ log_file.write("Scores:\n")
295
+ log_file.write(f" BLEU: {result['bleu']:.4f}\n")
296
+ log_file.write(f" ROUGE-1: {result['rouge']['rouge-1']:.4f}\n")
297
+ log_file.write(f" ROUGE-2: {result['rouge']['rouge-2']:.4f}\n")
298
+ log_file.write(f" ROUGE-L: {result['rouge']['rouge-l']:.4f}\n")
299
+ log_file.write(f" Cosine Similarity: {result['cosine_similarity']:.4f}\n")
300
+ log_file.write(f" Structural Similarity: {result['structural_similarity']:.4f}\n")
301
+ log_file.write(f" Information Retrieval: {result['information_retrieval']:.4f}\n")
302
+ log_file.write(f" Code Consistency: {result['code_consistency']:.4f}\n")
303
+ log_file.write(f" Readability: {result['readability']:.4f}\n")
304
+ log_file.write(f" Weighted Score: {result['weighted_score']:.4f}\n")
305
 
306
+ print(f"\nResults saved to {log_filename}")
 
307
 
308
+ def generate_fine_tune_jsonl(dataset, output_file):
309
+ system_prompt = """You are an AI assistant tasked with creating a README.md file for a GitHub repository.
310
+ Your response should contain ONLY the content of the README.md file, without any additional explanations or markdown code blocks.
311
+ The README should include the following sections:
312
+ 1. Project Title
313
+ 2. Description
314
+ 3. Installation
315
+ 4. Usage
316
+ 5. Features
317
+ 6. Contributing
318
+ 7. License
319
+ Ensure that your response is well-structured, informative, and directly usable as a README.md file."""
320
+
321
+ with open(output_file, 'w') as f:
322
+ for item in tqdm(dataset['train'], desc="Generating fine-tune data"):
323
+ user_prompt = f"Here is the content of the repository:\n\n{item['repo_content']}\n\nBased on this content, please generate a README.md file."
324
+
325
+ messages = [
326
+ {"role": "system", "content": system_prompt},
327
+ {"role": "user", "content": user_prompt},
328
+ {"role": "assistant", "content": item['repo_readme']}
329
+ ]
330
+
331
+ json.dump({"messages": messages}, f)
332
+ f.write('\n')
333
+
334
+ def find_similar_examples(repo_content, train_dataset, n):
335
+ vectorizer = TfidfVectorizer()
336
+ train_contents = [item['repo_content'] for item in train_dataset]
337
+ train_vectors = vectorizer.fit_transform(train_contents)
338
+ query_vector = vectorizer.transform([repo_content])
339
+
340
+ similarities = cosine_similarity(query_vector, train_vectors).flatten()
341
+ top_n_indices = similarities.argsort()[-n:][::-1]
342
+
343
+ return [train_dataset[int(i)] for i in top_n_indices]
344
+
345
+ def generate_readme_n_shot(repo_content, model_name, client, train_dataset, n_shot):
346
+ similar_examples = find_similar_examples(repo_content, train_dataset, n_shot)
347
+
348
+ system_prompt = """You are an AI assistant tasked with creating a README.md file for a GitHub repository.
349
+ Your response should contain ONLY the content of the README.md file, without any additional explanations or markdown code blocks.
350
+ The README should include the following sections:
351
+ 1. Project Title
352
+ 2. Description
353
+ 3. Installation
354
+ 4. Usage
355
+ 5. Features
356
+ 6. Contributing
357
+ 7. License
358
+ Ensure that your response is well-structured, informative, and directly usable as a README.md file."""
359
+
360
+ few_shot_examples = ""
361
+ for example in similar_examples:
362
+ few_shot_examples += f"Repository content:\n\n{example['repo_content']}\n\n"
363
+ few_shot_examples += f"Generated README:\n\n{example['repo_readme']}\n\n---\n\n"
364
+
365
+ user_prompt = f"""Here are some examples of repository contents and their corresponding README files:
366
+
367
+ {few_shot_examples}
368
+ Now, here is the content of the repository you need to create a README for:
369
+
370
+ {repo_content}
371
+
372
+ Based on this content and the examples provided, please generate a README.md file."""
373
+
374
+ if client == 'gemini':
375
+ safe = [
376
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
377
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
378
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
379
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
380
+ ]
381
+ model = genai.GenerativeModel(model_name, safety_settings=safe)
382
+ response = model.generate_content(user_prompt)
383
+ return response.text
384
+ else:
385
+ response = client.chat.completions.create(
386
+ model=model_name,
387
+ messages=[
388
+ {"role": "system", "content": system_prompt},
389
+ {"role": "user", "content": user_prompt}
390
+ ]
391
+ )
392
+ return response.choices[0].message.content
393
 
394
  if __name__ == "__main__":
395
+ parser = argparse.ArgumentParser(description="Generate and evaluate README files using OpenAI or Gemini API, or compute oracle scores")
396
+ parser.add_argument("model", nargs='?', help="Model to use (e.g., 'gpt-4o-mini' for OpenAI or 'gemini-1.5-flash' for Google)")
397
+ parser.add_argument("--base_url", help="Optional base URL for OpenAI API", default=None)
398
+ parser.add_argument("--oracle", action="store_true", help="Compute oracle scores using existing READMEs")
399
+ parser.add_argument("--generate-fine-tune-jsonl", action="store_true", help="Generate a JSONL file for fine-tuning")
400
+ parser.add_argument("--n_shot", type=int, default=0, help="Number of examples to use for few-shot learning")
401
+ args = parser.parse_args()
402
+
403
+ if args.generate_fine_tune_jsonl:
404
+ if args.oracle or args.model:
405
+ parser.error("--generate-fine-tune-jsonl flag cannot be used with --oracle or model specification")
406
+ elif args.oracle and args.model:
407
+ parser.error("--oracle flag cannot be used with a model specification")
408
+ elif not args.oracle and not args.model:
409
+ parser.error("Either --oracle flag or a model name must be provided")
410
+
411
+ main(args)