#!/usr/bin/env python3 """ Debug script to test translation with actual prompt """ import openai from config import Config from models import TranslationBatch, TranslationItem def test_translation(): """Test translation with actual prompt""" config = Config() client = openai.OpenAI( base_url=config.llm_config['base_url'], api_key=config.llm_config.get('api_key', 'not-needed') ) # Create a test batch items = [ TranslationItem(name="app_name", value="Test App"), TranslationItem(name="welcome", value="Welcome to our app!") ] batch = TranslationBatch( items=items, target_language="values-de-rDE", target_file="strings.xml" ) # Build the actual prompt instruction = "Translate to German (Germany). Use informal 'du' form for user-facing text. Keep technical terms in English if commonly used." prompt = f"Translate the following Android strings to {batch.target_language}.\n\n" prompt += f"Instructions: {instruction}\n\n" prompt += "Format your response as a JSON array with the same order as input:\n" prompt += "[\"translation1\", \"translation2\", ...]\n\n" prompt += "Strings to translate:\n" for i, item in enumerate(batch.items): prompt += f"{i + 1}. {item.value}\n" print("=== PROMPT ===") print(prompt) print("=== END PROMPT ===\n") try: response = client.chat.completions.create( model=config.llm_config['model'], messages=[ {"role": "system", "content": instruction}, {"role": "user", "content": prompt} ], temperature=0.3, timeout=30 ) print(f"Response type: {type(response)}") if hasattr(response, 'choices') and response.choices: choice = response.choices[0] if hasattr(choice, 'message'): content = choice.message.content print(f"Content type: {type(content)}") print(f"Content: {repr(content)}") if content is None: print("CONTENT IS NONE!") else: print(f"Content length: {len(content)}") print(f"Content stripped: '{content.strip()}'") except Exception as e: print(f"Error: {e}") import traceback traceback.print_exc() if __name__ == "__main__": test_translation()