Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions robin/analyses.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ async def data_analysis(

# Step 1: Gating, MFI and statistical analysis
analysis_step = Step(
name="job-futurehouse-data-analysis-crow-high",
name=configuration.agent_settings.data_analysis_agent,
prompt_template=analysis_prompt,
cot_prompt=False,
input_files={data_path: "flow_250508/"}, # change this to your input folder
Expand All @@ -65,7 +65,7 @@ async def data_analysis(

# Step 2: Consensus Analysis
consensus_step = Step(
name="job-futurehouse-data-analysis-crow-high",
name=configuration.agent_settings.data_analysis_agent,
prompt_template=consensus_prompt,
cot_prompt=False,
input_files={
Expand Down
15 changes: 7 additions & 8 deletions robin/assays.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ async def generate_assay_queries(
A dictionary of queries for the literature search, where keys and values
are the query strings.
"""
logger.info("\n\nStep 1: Formulating relevant queries for literature search...")
logger.info("Step 1: Formulating relevant queries for literature search...")

assay_literature_system_message = (
configuration.prompts.assay_literature_system_message.format(
Expand Down Expand Up @@ -87,7 +87,7 @@ async def experimental_assay_lit_review(
Returns:
A string containing the summarized literature review.
"""
logger.info("\n\nStep 2: Conducting literature search with Edison platform...")
logger.info("Step 2: Conducting literature search with Edison platform...")

assay_lit_review = await call_platform(
queries=experimental_assay_queries_dict,
Expand Down Expand Up @@ -122,7 +122,7 @@ async def propose_experimental_assay(
Returns:
A list of formatted strings, where each string represents a proposed assay.
"""
logger.info("\n\nStep 3: Generating ideas for relevant experimental assays...")
logger.info("Step 3: Generating ideas for relevant experimental assays...")

assay_proposal_system_message = (
configuration.prompts.assay_proposal_system_message.format(
Expand Down Expand Up @@ -165,7 +165,7 @@ async def propose_experimental_assay(

await f.write(f"Assay Candidate {i + 1}:\n")
await f.write(f"{strategy}\n")
await f.write(f"{reasoning}\n\n")
await f.write(f"{reasoning}")

logger.info(f"Successfully exported to {assay_list_export_file}")

Expand All @@ -188,7 +188,7 @@ async def experimental_assay_detailed_reports(
Returns:
A dictionary containing the raw results from the platform call.
"""
logger.info("\n\nStep 4: Detailed investigation and evaluation for each assay...")
logger.info("Step 4: Detailed investigation and evaluation for each assay...")

def create_assay_hypothesis_queries(assay_idea_list: list[str]) -> dict[str, str]:
assay_hypothesis_system_prompt = (
Expand Down Expand Up @@ -247,7 +247,7 @@ async def select_top_experimental_assay(
Returns:
The name (hypothesis) of the top-ranked experimental assay.
"""
logger.info("\n\nStep 5: Selecting the top experimental assay...")
logger.info("Step 5: Selecting the top experimental assay...")

assay_hypothesis_df = pd.DataFrame(assay_hypotheses["results"])
assay_hypothesis_df["index"] = assay_hypothesis_df.index
Expand All @@ -271,7 +271,7 @@ async def select_top_experimental_assay(

await run_comparisons(
pairs_list=assay_pairs_list,
client=configuration.llm_client,
client=configuration.llm_judge,
system_prompt=assay_ranking_system_prompt,
ranking_prompt_format=assay_ranking_prompt_format,
assay_hypothesis_df=assay_hypothesis_df,
Expand Down Expand Up @@ -353,7 +353,6 @@ async def experimental_assay(configuration: RobinConfiguration) -> str:
The synthesized candidate generation goal, or None if the process fails.
"""
logger.info("Starting selection of a relevant experimental assay.")
logger.info("————————————————————————————————————————————————————")

# Step 1: Generate queries for Crow
experimental_assay_queries_dict = await generate_assay_queries(configuration)
Expand Down
33 changes: 13 additions & 20 deletions robin/candidates.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ async def generate_candidate_queries(
Returns:
A dictionary of queries for the literature search.
"""
logger.info("\n\nStep 1: Formulating relevant queries for literature search...")
logger.info("Step 1: Formulating relevant queries for literature search...")

candidate_query_generation_system_message = (
configuration.prompts.candidate_query_generation_system_message.format(
Expand Down Expand Up @@ -112,7 +112,7 @@ async def candidate_lit_review(
Returns:
A string containing the summarized literature review.
"""
logger.info("\n\nStep 2: Conducting literature search with Edison platform...")
logger.info("Step 2: Conducting literature search with Edison platform...")

run_folder_name = str(configuration.run_folder_name)

Expand Down Expand Up @@ -157,7 +157,7 @@ async def propose_therapeutic_candidates( # noqa: PLR0912
A list of formatted strings, each representing a proposed candidate.
"""
logger.info(
f"\n\nStep 3: Generating {configuration.num_candidates} ideas for therapeutic"
f"Step 3: Generating {configuration.num_candidates} ideas for therapeutic"
" candidates..."
)

Expand Down Expand Up @@ -199,19 +199,12 @@ async def propose_therapeutic_candidates( # noqa: PLR0912
Message(role="user", content=candidate_generation_user_message),
]

if "claude" in configuration.llm_name:
candidate_generation_result = await configuration.llm_client.call_single(
messages,
timeout=600,
temperature=1,
max_tokens=32000,
reasoning_effort="high",
)
else:
candidate_generation_result = await configuration.llm_client.call_single(
messages,
temperature=1,
)
candidate_generation_result = await configuration.llm_client.call_single(
messages,
timeout=600,
temperature=1,
reasoning_effort="high",
)

llm_raw_output = cast(str, candidate_generation_result.text)
candidate_ideas_json = []
Expand Down Expand Up @@ -301,7 +294,7 @@ async def propose_therapeutic_candidates( # noqa: PLR0912
await f.write(f"Therapeutic Candidate {i + 1}:\n")
await f.write(f"{parts[0]}\n") # Candidate
await f.write(f"{parts[1]}\n") # Hypothesis
await f.write(f"{parts[2]}\n\n") # Reasoning
await f.write(f"{parts[2]}") # Reasoning

logger.info(f"Successfully exported to {export_file}")
return candidate_idea_list
Expand All @@ -320,7 +313,7 @@ async def candidate_detailed_reports(
candidate_idea_list: The list of proposed candidate strings.
experimental_insights: Optional insights from experimental data analysis.
"""
logger.info("\n\nStep 4: Detailed investigation and evaluation for candidates...")
logger.info("Step 4: Detailed investigation and evaluation for candidates...")
run_folder_name = str(configuration.run_folder_name)

def create_therapeutic_candidate_queries(
Expand Down Expand Up @@ -380,7 +373,7 @@ async def rank_therapeutic_candidates( # noqa: PLR0912
configuration: The RobinConfiguration object for the run.
experimental_insights: Optional insights from experimental data analysis.
"""
logger.info("\n\nStep 5: Ranking the strength of the therapeutic candidates...")
logger.info("Step 5: Ranking the strength of the therapeutic candidates...")

run_folder_name = str(configuration.run_folder_name)
hypotheses_folder = (
Expand Down Expand Up @@ -412,7 +405,7 @@ async def rank_therapeutic_candidates( # noqa: PLR0912

await run_comparisons(
pairs_list=pairs_list,
client=configuration.llm_client,
client=configuration.llm_judge,
system_prompt=system_prompt,
ranking_prompt_format=prompt_format,
assay_hypothesis_df=candidate_information_df,
Expand Down
Loading