diff --git a/langchain/openai-translator/ai_translator/gradio_server.py b/langchain/openai-translator/ai_translator/gradio_server.py index 8f7d8569..5c4228bf 100644 --- a/langchain/openai-translator/ai_translator/gradio_server.py +++ b/langchain/openai-translator/ai_translator/gradio_server.py @@ -8,14 +8,28 @@ from translator import PDFTranslator, TranslationConfig -def translation(input_file, source_language, target_language): - LOG.debug(f"[翻译任务]\n源文件: {input_file.name}\n源语言: {source_language}\n目标语言: {target_language}") +def translation(input_file, source_language, target_language, style, writer): + LOG.debug(f"[翻译任务]\n源文件: {input_file.name}\n源语言: {source_language}\n目标语言: {target_language}\n文体类型: {style}\n作家名字: {writer}") output_file_path = Translator.translate_pdf( - input_file.name, source_language=source_language, target_language=target_language) + input_file.name, source_language=source_language, target_language=target_language, style=style, writer=writer) return output_file_path +def change_writer(choice): + if choice == "input": + return gr.Textbox.update(visible=True, value="") + else: + return gr.Textbox.update(visible=False, value=choice) + +def change_style(choice): + if choice == "custom": + return gr.Textbox.update(visible=True, value="") + elif choice == "novel" or choice == "news": + return gr.Textbox.update(visible=False, value=choice) + else: + return gr.Textbox.update(visible=False, value="none") + def launch_gradio(): iface = gr.Interface( @@ -24,7 +38,8 @@ def launch_gradio(): inputs=[ gr.File(label="上传PDF文件"), gr.Textbox(label="源语言(默认:英文)", placeholder="English", value="English"), - gr.Textbox(label="目标语言(默认:中文)", placeholder="Chinese", value="Chinese") + gr.Textbox(label="目标语言(默认:中文)", placeholder="Chinese", value="Chinese"), + gr.Radio([" none", "novel", "news", ""], label="文本类型", info="Where did they go?") ], outputs=[ gr.File(label="下载翻译文件") @@ -34,6 +49,31 @@ def launch_gradio(): iface.launch(share=True, server_name="0.0.0.0") +def launch_gradio_by_blocks(): + with gr.Blocks() as blocks: + input_file = gr.File(label="上传PDF文件") + source_language = gr.Textbox( + label="源语言(默认:英文)", placeholder="English", value="English" + ) + target_language = gr.Textbox( + label="目标语言(默认:中文)", placeholder="Chinese", value="Chinese" + ) + style_radio = gr.Radio(["none", "novel", "news", "custom"], label="文体类型", info="选择文体类型,custom 自定义输入", value="none") + style_text = gr.Textbox(show_label=False, lines=1, visible=False, placeholder="输入文体类型", value="none") + style_radio.change(fn=change_style, inputs=style_radio, outputs=style_text) + writer_radio = gr.Radio(["none", "input"], label="作家风格", info="input 自定义输入作家名字", value="none") + writer_text = gr.Textbox(show_label=False, lines=1, visible=False, placeholder="输入作家名字", value="none") + writer_radio.change(fn=change_writer, inputs=writer_radio, outputs=writer_text) + output_file = gr.File(label="下载翻译文件") + clear = gr.ClearButton(components=[input_file, source_language, target_language, style_radio, writer_radio, writer_text]) + submit = gr.Button("Submit") + submit.click( + translation, + inputs=[input_file, source_language, target_language, style_text, writer_text], + outputs=[output_file], + ) + blocks.launch(share=True, server_name="0.0.0.0") + def initialize_translator(): # 解析命令行 argument_parser = ArgumentParser() @@ -51,4 +91,5 @@ def initialize_translator(): # 初始化 translator initialize_translator() # 启动 Gradio 服务 - launch_gradio() + //launch_gradio() + launch_gradio_by_blocks() diff --git a/langchain/openai-translator/ai_translator/translator/pdf_translator.py b/langchain/openai-translator/ai_translator/translator/pdf_translator.py index 873dee77..36e063b2 100644 --- a/langchain/openai-translator/ai_translator/translator/pdf_translator.py +++ b/langchain/openai-translator/ai_translator/translator/pdf_translator.py @@ -15,14 +15,22 @@ def translate_pdf(self, output_file_format: str = 'markdown', source_language: str = "English", target_language: str = 'Chinese', + style: str = 'none', + writer: str = 'none', pages: Optional[int] = None): self.book = self.pdf_parser.parse_pdf(input_file, pages) + style_template = f"" + if style != 'none': + style_template += f"The translation is in {style} style." + if writer != 'none': + style_template += f"Refer to the style of writer {writer}" + for page_idx, page in enumerate(self.book.pages): for content_idx, content in enumerate(page.contents): # Translate content.original - translation, status = self.translate_chain.run(content, source_language, target_language) + translation, status = self.translate_chain.run(content, source_language, target_language,style_template) # Update the content in self.book.pages directly self.book.pages[page_idx].contents[content_idx].set_translation(translation, status) diff --git a/langchain/openai-translator/ai_translator/translator/translation_chain.py b/langchain/openai-translator/ai_translator/translator/translation_chain.py index e5c85880..e030f3ab 100644 --- a/langchain/openai-translator/ai_translator/translator/translation_chain.py +++ b/langchain/openai-translator/ai_translator/translator/translation_chain.py @@ -10,7 +10,7 @@ def __init__(self, model_name: str = "gpt-3.5-turbo", verbose: bool = True): # 翻译任务指令始终由 System 角色承担 template = ( """You are a translation expert, proficient in various languages. \n - Translates {source_language} to {target_language}.""" + Translates {source_language} to {target_language}.{style_template}""" ) system_message_prompt = SystemMessagePromptTemplate.from_template(template) @@ -24,17 +24,18 @@ def __init__(self, model_name: str = "gpt-3.5-turbo", verbose: bool = True): ) # 为了翻译结果的稳定性,将 temperature 设置为 0 - chat = ChatOpenAI(model_name=model_name, temperature=0, verbose=verbose) + chat = ChatOpenAI(model_name=model_name, base_url= os.getenv("OPENAI_BASE_URL"),api_key=os.getenv("OPENAI_API_KEY"),temperature=0, verbose=verbose) self.chain = LLMChain(llm=chat, prompt=chat_prompt_template, verbose=verbose) - def run(self, text: str, source_language: str, target_language: str) -> (str, bool): + def run(self, text: str, source_language: str, target_language: str, style_template: str) -> (str, bool): result = "" try: result = self.chain.run({ "text": text, "source_language": source_language, "target_language": target_language, + "style_template": style_template, }) except Exception as e: LOG.error(f"An error occurred during translation: {e}") diff --git a/openai-translator/ai_translator/app.py b/openai-translator/ai_translator/app.py new file mode 100644 index 00000000..738342d1 --- /dev/null +++ b/openai-translator/ai_translator/app.py @@ -0,0 +1,46 @@ +import os, json +import sys +# not sure why can't import module without it. +sys.path.append(os.path.dirname(os.path.abspath(__file__))) + +from flask import Flask,request +# open ai +from translator import PDFTranslator +from model import OpenAIModel + +app = Flask(__name__) + +# Upload path +UPLOAD_FOLDER = '/Users/Kelven/TempUpload/' +app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + +# translate the file from local dir +@app.route('/api/v1/translate/', methods=['POST']) +def file_translate(filename): + + data = request.get_data() + # parse request data + json_data = json.loads(data.decode('utf-8')) + file_type = json_data.get('file_type') + filefullname = filename + '.' + file_type + # support request language + target_language = json_data.get('target_language') + target_format = json_data.get('target_format') + + newModel = OpenAIModel(model='gpt-3.5-turbo', api_key=os.getenv("OPENAI_API_KEY")) + translator = PDFTranslator(newModel) + + # target_language not support yet + translator.translate_pdf(app.config['UPLOAD_FOLDER'] + filefullname, target_format) + + output_filename = app.config['UPLOAD_FOLDER'] + os.path.basename(app.config['UPLOAD_FOLDER'] + filename + "_translated.md") + + return { + 'status': 0, + 'msg': '', + 'data': output_filename + } + + +if __name__ == '__main__': + app.run(debug=True) \ No newline at end of file diff --git a/openai-translator/ai_translator/model/openai_model.py b/openai-translator/ai_translator/model/openai_model.py index 3d2d4bef..aac8b8d0 100644 --- a/openai-translator/ai_translator/model/openai_model.py +++ b/openai-translator/ai_translator/model/openai_model.py @@ -11,7 +11,7 @@ class OpenAIModel(Model): def __init__(self, model: str, api_key: str): self.model = model - self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) + self.client = OpenAI(base_url= os.getenv("OPENAI_BASE_URL"),api_key=os.getenv("OPENAI_API_KEY")) def make_request(self, prompt): attempts = 0