-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcoding_logic_34.py
More file actions
61 lines (51 loc) · 2.78 KB
/
coding_logic_34.py
File metadata and controls
61 lines (51 loc) · 2.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import json
import time
from google import genai
from google.colab import userdata
from preprocessing_util import clean_raw_text, AI_CONFIG, MODEL_NAME
# 1. Initialize API Client Once
api_key = userdata.get('GEMINI_API_KEY')
client = genai.Client(api_key=api_key)
# 2. Load Codebook
CODEBOOK_PATH = '/content/drive/MyDrive/34Batch/codebook_theme.json'
with open(CODEBOOK_PATH, 'r') as f:
CODEBOOK_DICT = json.load(f)
# 3. Centralized Prompting
SYSTEM_PROMPT = f"""
You are an expert qualitative researcher. Code the transcript using the JSON codebook below.
### CODEBOOK:
{json.dumps(CODEBOOK_DICT, indent=2)}
"""
VERIFIER_PROMPT = """
You are a Quality Assurance assistant. Your job is to prevent overcoding hallucinations.
Review the selected codes and ensure that only codes with direct, strong conceptual alignment.
ONLY REJECT (is_valid: false) if:
1. a code is a weak or tangential match
2. The code applied is not in the Codebook JSON at all.
3. Ensure exact string matches for codes.
Otherwise, return is_valid: true. Trust the Coder's domain expertise for library service categories.
OUTPUT: Return JSON {"is_valid": true/false, "feedback": "reason"}
"""
def code_transcript_with_verify(transcript):
cleaned_input = clean_raw_text(transcript)
if len(str(cleaned_input)) < 10:
return "Abandoned Chat", "N/A", "Insufficient data"
# --- STEP 1: INITIAL ATTEMPT (Demanding String Format) ---
initial_prompt = f"{SYSTEM_PROMPT}\n\nTranscript: {cleaned_input}\n\nOUTPUT FORMAT: Provide ONLY the code names separated by ' | '. Do not use JSON."
res = client.models.generate_content(model=MODEL_NAME, contents=initial_prompt, config=AI_CONFIG)
# Extracting text and thoughts
initial_code = res.text.strip().replace("```", "").replace("json", "").strip()
initial_thoughts = getattr(res.candidates[0].content.parts[0], 'thought', "No thoughts recorded")
# --- STEP 2: SURGICAL VERIFICATION ---
# We use a very light touch here to avoid the "over-correction" you saw
v_prompt = f"{VERIFIER_PROMPT}\n\nTRANSCRIPT: {cleaned_input}\n\nPROPOSED CODES: {initial_code}"
v_res = client.models.generate_content(model=MODEL_NAME, contents=v_prompt)
is_valid = '"is_valid": true' in v_res.text.lower()
feedback = v_res.text
# --- STEP 3: REVISION (IF NEEDED) ---
if not is_valid:
revision_prompt = f"{SYSTEM_PROMPT}\n\nTranscript: {cleaned_input}\n\nAUDIT FEEDBACK: {feedback}\n\nREVISE AND PROVIDE ONLY THE CODE NAMES SEPARATED BY ' | '."
rev_res = client.models.generate_content(model=MODEL_NAME, contents=revision_prompt, config=AI_CONFIG)
final_code = rev_res.text.strip().replace("```", "").replace("json", "").strip()
return final_code, feedback, f"REVISED | {initial_thoughts}"
return initial_code, "PASS", initial_thoughts