From 56bcc8d3c39b524e29ff2db4d70e58c55797406c Mon Sep 17 00:00:00 2001 From: Moemu Date: Tue, 6 May 2025 00:59:51 +0800 Subject: [PATCH 01/29] Update Translateion --- pages/applications.zh.mdx | 2 + pages/index.zh.mdx | 10 +- pages/introduction.zh.mdx | 4 +- pages/models.zh.mdx | 2 + pages/papers.zh.mdx | 597 ++++++++++++++++++++++++++++---------- pages/prompts.zh.mdx | 7 +- pages/readings.zh.mdx | 56 ++-- pages/research.en.mdx | 6 +- pages/research.zh.mdx | 2 + pages/risks.zh.mdx | 2 + pages/techniques.zh.mdx | 2 + pages/tools.zh.mdx | 23 +- 12 files changed, 520 insertions(+), 193 deletions(-) diff --git a/pages/applications.zh.mdx b/pages/applications.zh.mdx index 8c0263808..918b31d8e 100644 --- a/pages/applications.zh.mdx +++ b/pages/applications.zh.mdx @@ -1,6 +1,8 @@ # 提示应用 import { Callout } from 'nextra-theme-docs' +import {Cards, Card} from 'nextra-theme-docs' +import {FilesIcon} from 'components/icons' import ContentFileNames from 'components/ContentFileNames' diff --git a/pages/index.zh.mdx b/pages/index.zh.mdx index a938afe60..64b4028e8 100644 --- a/pages/index.zh.mdx +++ b/pages/index.zh.mdx @@ -10,11 +10,9 @@ import { Callout } from 'nextra/components' 基于对大语言模型的浓厚兴趣,我们编写了这份全新的提示工程指南,介绍了大语言模型相关的论文研究、学习指南、模型、讲座、参考资料、大语言模型能力以及与其他与提示工程相关的工具。 - -We are excited to launch two new prompt engineering courses. Get access by joining our DAIR.AI Academy. [Join now!](https://dair-ai.thinkific.com/) - -Use code PROMPTING20 to get an extra 20% off. - -IMPORTANT: The discount is limited to the first 500 students. +## 想要学得更多? + +了解我们最新推出的人工智能课程中关于高级提示工程技术与最佳实践的内容。 [立即加入!](https://dair-ai.thinkific.com/) +使用优惠码 PROMPTING20 可额外享受 20% 折扣优惠。 diff --git a/pages/introduction.zh.mdx b/pages/introduction.zh.mdx index dcaaf2011..1a083d23f 100644 --- a/pages/introduction.zh.mdx +++ b/pages/introduction.zh.mdx @@ -1,5 +1,7 @@ # 提示工程简介 +import {Cards, Card} from 'nextra-theme-docs' +import { CardsIcon, OneIcon, WarningIcon, FilesIcon} from 'components/icons' import ContentFileNames from 'components/ContentFileNames' @@ -7,6 +9,6 @@ import ContentFileNames from 'components/ContentFileNames' 本指南介绍了提示词相关的基础知识,帮助用户了解如何通过提示词和大语言模型进行交互并提供指导建议。 -除非另有说明,本指南默认所有示例都是在 OpenAI 的 Playground 上使用 `gpt-3.5-turbo` 进行测试。模型使用默认配置,即 temperature=1 和 top_p=1 。这些提示也应适用于具有类似功能的其他模型,如gpt-3.5-turbo,但模型响应可能会有所不同。 +除非另有说明,本指南默认所有示例都是在 OpenAI 的 [Playground](https://platform.openai.com/playground) 上使用 `gpt-3.5-turbo` 进行测试。模型使用默认配置,即 `temperature=1` 和 `top_p=1` 。这些提示也应适用于具有类似功能的其他模型,如 `gpt-3.5-turbo` ,但模型响应可能会有所不同。 \ No newline at end of file diff --git a/pages/models.zh.mdx b/pages/models.zh.mdx index 39cce47b3..2feba9820 100644 --- a/pages/models.zh.mdx +++ b/pages/models.zh.mdx @@ -1,6 +1,8 @@ # 模型 import { Callout } from 'nextra-theme-docs' +import {Cards, Card} from 'nextra-theme-docs' +import {FilesIcon} from 'components/icons' import ContentFileNames from 'components/ContentFileNames' diff --git a/pages/papers.zh.mdx b/pages/papers.zh.mdx index 843c02c80..bf6fded10 100644 --- a/pages/papers.zh.mdx +++ b/pages/papers.zh.mdx @@ -4,171 +4,446 @@ ## 综述 - - [Nature Language Reasoning, A Survey](https://arxiv.org/abs/2303.14725) (March 2023) - - [Augmented Language Models: a Survey](https://arxiv.org/abs/2302.07842) (Feb 2023) - - [A Survey for In-context Learning](https://arxiv.org/abs/2301.00234) (Dec 2022) - - [Towards Reasoning in Large Language Models: A Survey](https://arxiv.org/abs/2212.10403) (Dec 2022) - - [Reasoning with Language Model Prompting: A Survey](https://arxiv.org/abs/2212.09597) (Dec 2022) - - [Emergent Abilities of Large Language Models](https://arxiv.org/abs/2206.07682) (Jun 2022) - - [A Taxonomy of Prompt Modifiers for Text-To-Image Generation](https://arxiv.org/abs/2204.13988) (Apr 2022) - - [Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing](https://arxiv.org/abs/2107.13586) (Jul 2021) +- [The Prompt Report: A Systematic Survey of Prompting Techniques](https://arxiv.org/abs/2406.06608) (June 2024) +- [Prompt Design and Engineering: Introduction and Advanced Methods](https://arxiv.org/abs/2401.14423) (January 2024) +- [A Survey on Hallucination in Large Language Models: Principles,Taxonomy, Challenges, and Open Questions](https://arxiv.org/abs/2311.05232) (November 2023) +- [An RL Perspective on RLHF, Prompting, and Beyond](https://arxiv.org/abs/2310.06147) (October 2023) +- [Few-shot Fine-tuning vs. In-context Learning: A Fair Comparison and Evaluation](https://arxiv.org/abs/2305.16938) (May 2023) +- [Jailbreaking ChatGPT via Prompt Engineering: An Empirical Study](https://arxiv.org/abs/2305.13860) (May 2023) +- [Harnessing the Power of LLMs in Practice: A Survey on ChatGPT and Beyond](https://arxiv.org/abs/2304.13712) (April 2023) +- [Tool Learning with Foundation Models](https://arxiv.org/abs/2304.08354) (April 2023) +- [One Small Step for Generative AI, One Giant Leap for AGI: A Complete Survey on ChatGPT in AIGC Era](https://arxiv.org/abs/2304.06488) (April 2023) +- [A Bibliometric Review of Large Language Models Research from 2017 to 2023](https://arxiv.org/abs/2304.02020) (April 2023) +- [A Survey of Large Language Models](https://arxiv.org/abs/2303.18223) (April 2023) +- [Nature Language Reasoning, A Survey](https://arxiv.org/abs/2303.14725) (March 2023) +- [Augmented Language Models: a Survey](https://arxiv.org/abs/2302.07842) (February 2023) +- [A Survey for In-context Learning](https://arxiv.org/abs/2301.00234) (December 2022) +- [Towards Reasoning in Large Language Models: A Survey](https://arxiv.org/abs/2212.10403) (December 2022) +- [Reasoning with Language Model Prompting: A Survey](https://arxiv.org/abs/2212.09597) (December 2022) +- [Emergent Abilities of Large Language Models](https://arxiv.org/abs/2206.07682) (June 2022) +- [A Taxonomy of Prompt Modifiers for Text-To-Image Generation](https://arxiv.org/abs/2204.13988) (April 2022) +- [Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing](https://arxiv.org/abs/2107.13586) (July 2021) ## 方法 - - [Enhancing Zero-Shot Chain-of-Thought Reasoning in Large Language Models through Logic - ](https://arxiv.org/abs/2309.13339) (February 2024) - - [Self-Refine: Iterative Refinement with Self-Feedback](https://arxiv.org/abs/2303.17651v1) (Mar 2023) - - [kNN Prompting: Beyond-Context Learning with Calibration-Free Nearest Neighbor Inference](https://arxiv.org/abs/2303.13824) (Mar 2023) - - [Visual-Language Prompt Tuning with Knowledge-guided Context Optimization](https://arxiv.org/abs/2303.13283) (Mar 2023) - - [Fairness-guided Few-shot Prompting for Large Language Models](https://arxiv.org/abs/2303.13217) (Mar 2023) - - [Context-faithful Prompting for Large Language Models](https://arxiv.org/abs/2303.11315) (Mar 2023) - - [Is Prompt All You Need? No. A Comprehensive and Broader View of Instruction Learning](https://arxiv.org/abs/2303.10475) (Mar 2023) - - [UPRISE: Universal Prompt Retrieval for Improving Zero-Shot Evaluation](https://arxiv.org/abs/2303.08518) (Mar 2023) - - [Model-tuning Via Prompts Makes NLP Models Adversarially Robust](https://arxiv.org/abs/2303.07320) (Mar 2023) - - [Structure Pretraining and Prompt Tuning for Knowledge Graph Transfer](https://arxiv.org/abs/2303.03922) (March 2023) - - [CoTEVer: Chain of Thought Prompting Annotation Toolkit for Explanation Verification](https://arxiv.org/abs/2303.03628) (March 2023) - - [Larger language models do in-context learning differently](https://arxiv.org/abs/2303.03846) (March 2023) - - [OpenICL: An Open-Source Framework for In-context Learning](https://arxiv.org/abs/2303.02913) (March 2023) - - [Dynamic Prompting: A Unified Framework for Prompt Tuning](https://arxiv.org/abs/2303.02909) (March 2023) - - [Multitask Prompt Tuning Enables Parameter-Efficient Transfer Learning](https://arxiv.org/abs/2303.02861) (March 2023) - - [Effectiveness of Data Augmentation for Prefix Tuning with Limited Data](https://arxiv.org/abs/2303.02577) (March 2023) - - [Mixture of Soft Prompts for Controllable Data Generation](https://arxiv.org/abs/2303.01580) (March 2023) - - [Prompt, Generate, then Cache: Cascade of Foundation Models makes Strong Few-shot Learners](https://arxiv.org/abs/2303.02151) (March 2023) - - [How Robust is GPT-3.5 to Predecessors? A Comprehensive Study on Language Understanding Tasks](https://arxiv.org/abs/2303.00293) (March 2023) - - [Can ChatGPT Understand Too? A Comparative Study on ChatGPT and Fine-tuned BERT](https://arxiv.org/pdf/2302.10198.pdf) (Feb 2023) - - [EvoPrompting: Language Models for Code-Level Neural Architecture Search](https://arxiv.org/abs/2302.14838) (Feb 2023) - - [In-Context Instruction Learning](https://arxiv.org/abs/2302.14691) (Feb 2023) - - [Chain of Hindsight Aligns Language Models with Feedback](https://arxiv.org/abs/2302.02676) (Feb 2023) - - [Language Is Not All You Need: Aligning Perception with Language Models](https://arxiv.org/abs/2302.14045) (Feb 2023) - - [Automatic Prompt Augmentation and Selection with Chain-of-Thought from Labeled Data](https://arxiv.org/abs/2302.12822) (Feb 2023) - - [Active Prompting with Chain-of-Thought for Large Language Models](https://arxiv.org/abs/2302.12246) (Feb 2023) - - [More than you've asked for: A Comprehensive Analysis of Novel Prompt Injection Threats to Application-Integrated Large Language Models](https://arxiv.org/abs/2302.12173) (Feb 2023) - - [A Prompt Pattern Catalog to Enhance Prompt Engineering with ChatGPT](https://arxiv.org/abs/2302.11382) (Feb 2023) - - [Guiding Large Language Models via Directional Stimulus Prompting](https://arxiv.org/abs/2302.11520) (Feb 2023) - - [How Does In-Context Learning Help Prompt Tuning?](https://arxiv.org/abs/2302.11521) (Feb 2023) - - [Scalable Prompt Generation for Semi-supervised Learning with Language Models](https://arxiv.org/abs/2302.09236) (Feb 2023) - - [Bounding the Capabilities of Large Language Models in Open Text Generation with Prompt Constraints](https://arxiv.org/abs/2302.09185) (Feb 2023) - - [À-la-carte Prompt Tuning (APT): Combining Distinct Data Via Composable Prompting](https://arxiv.org/abs/2302.07994) (Feb 2023) - - [GraphPrompt: Unifying Pre-Training and Downstream Tasks for Graph Neural Networks](https://arxiv.org/abs/2302.08043) (Feb 2023) - - [The Capacity for Moral Self-Correction in Large Language Models](https://arxiv.org/abs/2302.07459) (Feb 2023) - - [SwitchPrompt: Learning Domain-Specific Gated Soft Prompts for Classification in Low-Resource Domains](https://arxiv.org/abs/2302.06868) (Feb 2023) - - [Evaluating the Robustness of Discrete Prompts](https://arxiv.org/abs/2302.05619) (Feb 2023) - - [Compositional Exemplars for In-context Learning](https://arxiv.org/abs/2302.05698) (Feb 2023) - - [Hard Prompts Made Easy: Gradient-Based Discrete Optimization for Prompt Tuning and Discovery](https://arxiv.org/abs/2302.03668) (Feb 2023) - - [Multimodal Chain-of-Thought Reasoning in Language Models](https://arxiv.org/abs/2302.00923) (Feb 2023) - - [Large Language Models Can Be Easily Distracted by Irrelevant Context](https://arxiv.org/abs/2302.00093) (Feb 2023) - - [Synthetic Prompting: Generating Chain-of-Thought Demonstrations for Large Language Models](https://arxiv.org/abs/2302.00618) (Feb 2023) - - [Progressive Prompts: Continual Learning for Language Models](https://arxiv.org/abs/2301.12314) (Jan 2023) - - [Batch Prompting: Efficient Inference with LLM APIs](https://arxiv.org/abs/2301.08721) (Jan 2023) - - [Demonstrate-Search-Predict: Composing retrieval and language models for knowledge-intensive NLP](https://arxiv.org/abs/2212.14024) (Dec 2022) - - [On Second Thought, Let's Not Think Step by Step! Bias and Toxicity in Zero-Shot Reasoning](https://arxiv.org/abs/2212.08061) (Dec 2022) - - [Constitutional AI: Harmlessness from AI Feedback](https://arxiv.org/abs/2212.08073) (Dec 2022) - - [Successive Prompting for Decomposing Complex Questions](https://arxiv.org/abs/2212.04092) (Dec 2022) - - [Large Language Models are reasoners with Self-Verification](https://arxiv.org/abs/2212.09561v1) (Dec 2022) - - [Discovering Language Model Behaviors with Model-Written Evaluations](https://arxiv.org/abs/2212.09251) (Dec 2022) - - [Structured Prompting: Scaling In-Context Learning to 1,000 Examples](https://arxiv.org/abs/2212.06713) (Dec 2022) - - [PAL: Program-aided Language Models](https://arxiv.org/abs/2211.10435) (Nov 2022) - - [Large Language Models Are Human-Level Prompt Engineers](https://arxiv.org/abs/2211.01910) (Nov 2022) - - [Ignore Previous Prompt: Attack Techniques For Language Models](https://arxiv.org/abs/2211.09527) (Nov 2022) - - [Machine Generated Text: A Comprehensive Survey of Threat Models and Detection Methods](https://arxiv.org/abs/2210.07321) (Nov 2022) - - [Teaching Algorithmic Reasoning via In-context Learning](https://arxiv.org/abs/2211.09066) (Nov 2022) - - [Enhancing Self-Consistency and Performance of Pre-Trained Language Models through Natural Language Inference](https://arxiv.org/abs/2211.11875) (Nov 2022) - - [Ask Me Anything: A simple strategy for prompting language models](https://paperswithcode.com/paper/ask-me-anything-a-simple-strategy-for) (Oct 2022) - - [Recitation-Augmented Language Models](https://arxiv.org/abs/2210.01296) (Oct 2022) - - [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629) (Oct 2022) - - [Prompting GPT-3 To Be Reliable](https://arxiv.org/abs/2210.09150) (Oct 2022) - - [Decomposed Prompting: A Modular Approach for Solving Complex Tasks](https://arxiv.org/abs/2210.02406) (Oct 2022) - - [Language Models Are Greedy Reasoners: A Systematic Formal Analysis of Chain-of-Thought](https://arxiv.org/abs/2210.01240v3) (Oct 2022) - - [Evaluating the Susceptibility of Pre-Trained Language Models via Handcrafted Adversarial Examples](https://arxiv.org/abs/2209.02128) (Sep 2022) - - [Dynamic Prompt Learning via Policy Gradient for Semi-structured Mathematical Reasoning](https://arxiv.org/abs/2209.14610) (Sep 2022) - - [Promptagator: Few-shot Dense Retrieval From 8 Examples](https://arxiv.org/abs/2209.11755) (Sep 2022) - - [Atlas: Few-shot Learning with Retrieval Augmented Language Models](https://arxiv.org/abs/2208.03299) (Nov 2022) - - [DocPrompting: Generating Code by Retrieving the Docs](https://arxiv.org/abs/2207.05987) (July 2022) - - [On the Advance of Making Language Models Better Reasoners](https://arxiv.org/abs/2206.02336) (June 2022) - - [Large Language Models are Zero-Shot Reasoners](https://arxiv.org/abs/2205.11916) (May 2022) - - [Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations](https://arxiv.org/abs/2205.11822) (May 2022) - - [MRKL Systems: A modular, neuro-symbolic architecture that combines large language models, external knowledge sources and discrete reasoning](https://arxiv.org/abs/2205.00445) (May 2022) - - [PPT: Pre-trained Prompt Tuning for Few-shot Learning](https://aclanthology.org/2022.acl-long.576/) (Mqy 2022) - - [Toxicity Detection with Generative Prompt-based Inference](https://arxiv.org/abs/2205.12390) (May 2022) - - [Learning to Transfer Prompts for Text Generation](https://arxiv.org/abs/2205.01543) (May 2022) - - [The Unreliability of Explanations in Few-shot Prompting for Textual Reasoning](https://arxiv.org/abs/2205.03401) (May 2022) - - [A Taxonomy of Prompt Modifiers for Text-To-Image Generation](https://arxiv.org/abs/2204.13988) (Apr 2022) - - [PromptChainer: Chaining Large Language Model Prompts through Visual Programming](https://arxiv.org/abs/2203.06566) (Mar 2022) - - [Self-Consistency Improves Chain of Thought Reasoning in Language Models](https://arxiv.org/abs/2203.11171) (March 2022) - - [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) - - [Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?](https://arxiv.org/abs/2202.12837) (Feb 2022) - - [Chain of Thought Prompting Elicits Reasoning in Large Language Models](https://arxiv.org/abs/2201.11903) (Jan 2022) - - [Show Your Work: Scratchpads for Intermediate Computation with Language Models](https://arxiv.org/abs/2112.00114) (Nov 2021) - - [AI Chains: Transparent and Controllable Human-AI Interaction by Chaining Large Language Model Prompts](https://arxiv.org/abs/2110.01691) (Oct 2021) - - [Generated Knowledge Prompting for Commonsense Reasoning](https://arxiv.org/abs/2110.08387) (Oct 2021) - - [Multitask Prompted Training Enables Zero-Shot Task Generalization](https://arxiv.org/abs/2110.08207) (Oct 2021) - - [Reframing Instructional Prompts to GPTk's Language](https://arxiv.org/abs/2109.07830) (Sep 2021) - - [Design Guidelines for Prompt Engineering Text-to-Image Generative Models](https://arxiv.org/abs/2109.06977) (Sep 2021) - - [Making Pre-trained Language Models Better Few-shot Learners](https://aclanthology.org/2021.acl-long.295) (Aug 2021) - - [Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity](https://arxiv.org/abs/2104.08786) (April 2021) - - [BERTese: Learning to Speak to BERT](https://aclanthology.org/2021.eacl-main.316) (April 2021) - - [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691) (April 2021) - - [Prompt Programming for Large Language Models: Beyond the Few-Shot Paradigm](https://arxiv.org/abs/2102.07350) (Feb 2021) - - [Calibrate Before Use: Improving Few-Shot Performance of Language Models](https://arxiv.org/abs/2102.09690) (Feb 2021) - - [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://arxiv.org/abs/2101.00190) (Jan 2021) - - [Learning to Generate Task-Specific Adapters from Task Description](https://arxiv.org/abs/2101.00420) (Jan 2021) - - [Making Pre-trained Language Models Better Few-shot Learners](https://arxiv.org/abs/2012.15723) (Dec 2020) - - [Learning from Task Descriptions](https://aclanthology.org/2020.emnlp-main.105/) (Nov 2020) - - [AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts](https://arxiv.org/abs/2010.15980) (Oct 2020) - - [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) (May 2020) - - [How Can We Know What Language Models Know?](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00324/96460/How-Can-We-Know-What-Language-Models-Know) (July 2020) - - [Scaling Laws for Neural Language Models](https://arxiv.org/abs/2001.08361) (Jan 2020) +- [Enhancing Zero-Shot Chain-of-Thought Reasoning in Large Language Models through Logic](https://arxiv.org/abs/2309.13339) (February 2024) +- [Principled Instructions Are All You Need for Questioning LLaMA-1/2, GPT-3.5/4](https://arxiv.org/abs/2312.16171v1) (December 2023) +- [Walking Down the Memory Maze: Beyond Context Limit through Interactive Reading](https://arxiv.org/abs/2310.05029) (October 2023) +- [Large Language Models as Analogical Reasoners](https://arxiv.org/abs/2310.01714) (October 2023) +- [LLMLingua: Compressing Prompts for Accelerated Inference of Large Language Models](https://arxiv.org/abs/2310.05736) (October 2023) +- [Query-Dependent Prompt Evaluation and Optimization with Offline Inverse RL](https://arxiv.org/abs/2309.06653) (September 2023) +- [Chain-of-Verification Reduces Hallucination in Large Language Models](https://arxiv.org/abs/2309.11495) (September 2023) +- [Connecting Large Language Models with Evolutionary Algorithms Yields Powerful Prompt Optimizers](https://arxiv.org/abs/2309.08532) (September 2023) +- [From Sparse to Dense: GPT-4 Summarization with Chain of Density Prompting](https://arxiv.org/abs/2309.04269) (September 2023) +- [Re-Reading Improves Reasoning in Language Models](https://arxiv.org/abs/2309.06275) (September 2023) +- [Graph of Thoughts: Solving Elaborate Problems with Large Language Models](https://arxiv.org/abs/2308.09687v2) (August 2023) +- [Skeleton-of-Thought: Large Language Models Can Do Parallel Decoding](https://arxiv.org/abs/2307.15337) (July 2023) +- [Focused Prefix Tuning for Controllable Text Generation](https://arxiv.org/abs/2306.00369) (June 2023) +- [Exploring Lottery Prompts for Pre-trained Language Models](https://arxiv.org/abs/2305.19500) (May 2023) +- [Less Likely Brainstorming: Using Language Models to Generate Alternative Hypotheses](https://arxiv.org/abs/2305.19339) (May 2023) +- [Let's Verify Step by Step](https://arxiv.org/abs/2305.20050) (May 2023) +- [Universality and Limitations of Prompt Tuning](https://arxiv.org/abs/2305.18787) (May 2023) +- [MultiTool-CoT: GPT-3 Can Use Multiple External Tools with Chain of Thought Prompting](https://arxiv.org/abs/2305.16896) (May 2023) +- [PEARL: Prompting Large Language Models to Plan and Execute Actions Over Long Documents](https://arxiv.org/abs/2305.14564v1) (May 2023) +- [Reasoning with Language Model is Planning with World Model](https://arxiv.org/abs/2305.14992v1) (May 2023) +- [Self-Critique Prompting with Large Language Models for Inductive Instructions](https://arxiv.org/abs/2305.13733) (May 2023) +- [Better Zero-Shot Reasoning with Self-Adaptive Prompting](https://arxiv.org/abs/2305.14106) (May 2023) +- [Hierarchical Prompting Assists Large Language Model on Web Navigation](https://arxiv.org/abs/2305.14257) (May 2023) +- [Interactive Natural Language Processing](https://arxiv.org/abs/2305.13246) (May 2023) +- [Can We Edit Factual Knowledge by In-Context Learning?](https://arxiv.org/abs/2305.12740) (May 2023) +- [In-Context Learning of Large Language Models Explained as Kernel Regression](https://arxiv.org/abs/2305.12766) (May 2023) +- [Plan-and-Solve Prompting: Improving Zero-Shot Chain-of-Thought Reasoning by Large Language Models](https://arxiv.org/abs/2305.04091v3) (May 2023) +- [Meta-in-context learning in large language models](https://arxiv.org/abs/2305.12907) (May 2023) +- [Let's Sample Step by Step: Adaptive-Consistency for Efficient Reasoning with LLMs](https://arxiv.org/abs/2305.11860) (May 2023) +- [Post Hoc Explanations of Language Models Can Improve Language Models](https://arxiv.org/abs/2305.11426) (May 2023) +- [Compress, Then Prompt: Improving Accuracy-Efficiency Trade-off of LLM Inference with Transferable Prompt](https://arxiv.org/abs/2305.11186) (May 2023) +- [TreePrompt: Learning to Compose Tree Prompts for Explainable Visual Grounding](https://arxiv.org/abs/2305.11497) (May 2023) +- [TELeR: A General Taxonomy of LLM Prompts for Benchmarking Complex Tasks](https://arxiv.org/abs/2305.11430) (May 2023) +- [Efficient Prompting via Dynamic In-Context Learning](https://arxiv.org/abs/2305.11170) (May 2023) +- [The Web Can Be Your Oyster for Improving Large Language Models](https://arxiv.org/abs/2305.10998) (May 2023) +- [Flatness-Aware Prompt Selection Improves Accuracy and Sample Efficiency](https://arxiv.org/abs/2305.10713) (May 2023) +- [Tree of Thoughts: Deliberate Problem Solving with Large Language Models](https://arxiv.org/abs/2305.10601) (May 2023) +- [ZeroPrompt: Streaming Acoustic Encoders are Zero-Shot Masked LMs](https://arxiv.org/abs/2305.10649) (May 2023) +- [Chain-of-Symbol Prompting Elicits Planning in Large Langauge Models](https://arxiv.org/abs/2305.10276) (May 2023) +- [CooK: Empowering General-Purpose Language Models with Modular and Collaborative Knowledge](https://arxiv.org/abs/2305.09955) (May 2023) +- [What In-Context Learning "Learns" In-Context: Disentangling Task Recognition and Task Learning](https://arxiv.org/abs/2305.09731) (May 2023) +- [Reprompting: Automated Chain-of-Thought Prompt Inference Through Gibbs Sampling](https://arxiv.org/abs/2305.09993) (May 2023) +- [Satisfiability-Aided Language Models Using Declarative Prompting](https://arxiv.org/abs/2305.09656) (May 2023) +- [Pre-Training to Learn in Context](https://arxiv.org/abs/2305.09137) (May 2023) +- [Boosted Prompt Ensembles for Large Language Models](https://arxiv.org/abs/2304.05970) (April 2023) +- [Global Prompt Cell: A Portable Control Module for Effective Prompt](https://arxiv.org/abs/2304.05642) (April 2023) +- [Why think step-by-step? Reasoning emerges from the locality of experience](https://arxiv.org/abs/2304.03843) (April 2023) +- [Revisiting Automated Prompting: Are We Actually Doing Better?](https://arxiv.org/abs/2304.03609) (April 2023) +- [REFINER: Reasoning Feedback on Intermediate Representations](https://arxiv.org/abs/2304.01904) (April 2023) +- [Reflexion: an autonomous agent with dynamic memory and self-reflection](https://arxiv.org/abs/2303.11366) (March 2023) +- [CAMEL: Communicative Agents for "Mind" Exploration of Large Scale Language Model Society](https://arxiv.org/abs/2303.17760) (March 2023) +- [Self-Refine: Iterative Refinement with Self-Feedback](https://arxiv.org/abs/2303.17651v1) (March 2023) +- [kNN Prompting: Beyond-Context Learning with Calibration-Free Nearest Neighbor Inference](https://arxiv.org/abs/2303.13824) (March 2023) +- [Visual-Language Prompt Tuning with Knowledge-guided Context Optimization](https://arxiv.org/abs/2303.13283) (March 2023) +- [Fairness-guided Few-shot Prompting for Large Language Models](https://arxiv.org/abs/2303.13217) (March 2023) +- [Context-faithful Prompting for Large Language Models](https://arxiv.org/abs/2303.11315) (March 2023) +- [Is Prompt All You Need? No. A Comprehensive and Broader View of Instruction Learning](https://arxiv.org/abs/2303.10475) (March 2023) +- [UPRISE: Universal Prompt Retrieval for Improving Zero-Shot Evaluation](https://arxiv.org/abs/2303.08518) (March 2023) +- [Model-tuning Via Prompts Makes NLP Models Adversarially Robust](https://arxiv.org/abs/2303.07320) (March 2023) +- [Structure Pretraining and Prompt Tuning for Knowledge Graph Transfer](https://arxiv.org/abs/2303.03922) (March 2023) +- [CoTEVer: Chain of Thought Prompting Annotation Toolkit for Explanation Verification](https://arxiv.org/abs/2303.03628) (March 2023) +- [Larger language models do in-context learning differently](https://arxiv.org/abs/2303.03846) (March 2023) +- [OpenICL: An Open-Source Framework for In-context Learning](https://arxiv.org/abs/2303.02913) (March 2023) +- [Dynamic Prompting: A Unified Framework for Prompt Tuning](https://arxiv.org/abs/2303.02909) (March 2023) +- [ART: Automatic multi-step reasoning and tool-use for large language models](https://arxiv.org/abs/2303.09014) (March 2023) +- [Multitask Prompt Tuning Enables Parameter-Efficient Transfer Learning](https://arxiv.org/abs/2303.02861) (March 2023) +- [Effectiveness of Data Augmentation for Prefix Tuning with Limited Data](https://arxiv.org/abs/2303.02577) (March 2023) +- [Mixture of Soft Prompts for Controllable Data Generation](https://arxiv.org/abs/2303.01580) (March 2023) +- [Prompt, Generate, then Cache: Cascade of Foundation Models makes Strong Few-shot Learners](https://arxiv.org/abs/2303.02151) (March 2023) +- [How Robust is GPT-3.5 to Predecessors? A Comprehensive Study on Language Understanding Tasks](https://arxiv.org/abs/2303.00293) (March 2023) +- [Can ChatGPT Understand Too? A Comparative Study on ChatGPT and Fine-tuned BERT](https://arxiv.org/pdf/2302.10198.pdf) (February 2023) +- [EvoPrompting: Language Models for Code-Level Neural Architecture Search](https://arxiv.org/abs/2302.14838) (February 2023) +- [In-Context Instruction Learning](https://arxiv.org/abs/2302.14691) (February 2023) +- [Chain of Hindsight Aligns Language Models with Feedback](https://arxiv.org/abs/2302.02676) (February 2023) +- [Language Is Not All You Need: Aligning Perception with Language Models](https://arxiv.org/abs/2302.14045) (February 2023) +- [Automatic Prompt Augmentation and Selection with Chain-of-Thought from Labeled Data](https://arxiv.org/abs/2302.12822) (February 2023) +- [Active Prompting with Chain-of-Thought for Large Language Models](https://arxiv.org/abs/2302.12246) (February 2023) +- [More than you've asked for: A Comprehensive Analysis of Novel Prompt Injection Threats to Application-Integrated Large Language Models](https://arxiv.org/abs/2302.12173) (February 2023) +- [A Prompt Pattern Catalog to Enhance Prompt Engineering with ChatGPT](https://arxiv.org/abs/2302.11382) (February 2023) +- [Guiding Large Language Models via Directional Stimulus Prompting](https://arxiv.org/abs/2302.11520) (February 2023) +- [How Does In-Context Learning Help Prompt Tuning?](https://arxiv.org/abs/2302.11521) (February 2023) +- [Scalable Prompt Generation for Semi-supervised Learning with Language Models](https://arxiv.org/abs/2302.09236) (February 2023) +- [Bounding the Capabilities of Large Language Models in Open Text Generation with Prompt Constraints](https://arxiv.org/abs/2302.09185) (February 2023) +- [À-la-carte Prompt Tuning (APT): Combining Distinct Data Via Composable Prompting](https://arxiv.org/abs/2302.07994) (February 2023) +- [GraphPrompt: Unifying Pre-Training and Downstream Tasks for Graph Neural Networks](https://arxiv.org/abs/2302.08043) (February 2023) +- [The Capacity for Moral Self-Correction in Large Language Models](https://arxiv.org/abs/2302.07459) (February 2023) +- [SwitchPrompt: Learning Domain-Specific Gated Soft Prompts for Classification in Low-Resource Domains](https://arxiv.org/abs/2302.06868) (February 2023) +- [Evaluating the Robustness of Discrete Prompts](https://arxiv.org/abs/2302.05619) (February 2023) +- [Compositional Exemplars for In-context Learning](https://arxiv.org/abs/2302.05698) (February 2023) +- [Hard Prompts Made Easy: Gradient-Based Discrete Optimization for Prompt Tuning and Discovery](https://arxiv.org/abs/2302.03668) (February 2023) +- [Multimodal Chain-of-Thought Reasoning in Language Models](https://arxiv.org/abs/2302.00923) (February 2023) +- [Large Language Models Can Be Easily Distracted by Irrelevant Context](https://arxiv.org/abs/2302.00093) (February 2023) +- [Synthetic Prompting: Generating Chain-of-Thought Demonstrations for Large Language Models](https://arxiv.org/abs/2302.00618) (February 2023) +- [Progressive Prompts: Continual Learning for Language Models](https://arxiv.org/abs/2301.12314) (January 2023) +- [Batch Prompting: Efficient Inference with LLM APIs](https://arxiv.org/abs/2301.08721) (January 2023) +- [Demonstrate-Search-Predict: Composing retrieval and language models for knowledge-intensive NLP](https://arxiv.org/abs/2212.14024) (December 2022) +- [On Second Thought, Let's Not Think Step by Step! Bias and Toxicity in Zero-Shot Reasoning](https://arxiv.org/abs/2212.08061) (December 2022) +- [Constitutional AI: Harmlessness from AI Feedback](https://arxiv.org/abs/2212.08073) (December 2022) +- [Successive Prompting for Decomposing Complex Questions](https://arxiv.org/abs/2212.04092) (December 2022) +- [Large Language Models are reasoners with Self-Verification](https://arxiv.org/abs/2212.09561v1) (December 2022) +- [Discovering Language Model Behaviors with Model-Written Evaluations](https://arxiv.org/abs/2212.09251) (December 2022) +- [Structured Prompting: Scaling In-Context Learning to 1,000 Examples](https://arxiv.org/abs/2212.06713) (December 2022) +- [PAL: Program-aided Language Models](https://arxiv.org/abs/2211.10435) (November 2022) +- [Large Language Models Are Human-Level Prompt Engineers](https://arxiv.org/abs/2211.01910) (November 2022) +- [Ignore Previous Prompt: Attack Techniques For Language Models](https://arxiv.org/abs/2211.09527) (November 2022) +- [Machine Generated Text: A Comprehensive Survey of Threat Models and Detection Methods](https://arxiv.org/abs/2210.07321) (November 2022) +- [Teaching Algorithmic Reasoning via In-context Learning](https://arxiv.org/abs/2211.09066) (November 2022) +- [Enhancing Self-Consistency and Performance of Pre-Trained Language Models through Natural Language Inference](https://arxiv.org/abs/2211.11875) (November 2022) +- [Ask Me Anything: A simple strategy for prompting language models](https://paperswithcode.com/paper/ask-me-anything-a-simple-strategy-for) (October 2022) +- [Recitation-Augmented Language Models](https://arxiv.org/abs/2210.01296) (October 2022) +- [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629) (October 2022) +- [Prompting GPT-3 To Be Reliable](https://arxiv.org/abs/2210.09150) (October 2022) +- [Decomposed Prompting: A Modular Approach for Solving Complex Tasks](https://arxiv.org/abs/2210.02406) (October 2022) +- [Automatic Chain of Thought Prompting in Large Language Models](https://arxiv.org/abs/2210.03493) (October 2022) +- [Language Models Are Greedy Reasoners: A Systematic Formal Analysis of Chain-of-Thought](https://arxiv.org/abs/2210.01240v3) (October 2022) +- [Evaluating the Susceptibility of Pre-Trained Language Models via Handcrafted Adversarial Examples](https://arxiv.org/abs/2209.02128) (September 2022) +- [Dynamic Prompt Learning via Policy Gradient for Semi-structured Mathematical Reasoning](https://arxiv.org/abs/2209.14610) (September 2022) +- [Promptagator: Few-shot Dense Retrieval From 8 Examples](https://arxiv.org/abs/2209.11755) (September 2022) +- [Atlas: Few-shot Learning with Retrieval Augmented Language Models](https://arxiv.org/abs/2208.03299) (November 2022) +- [DocPrompting: Generating Code by Retrieving the Docs](https://arxiv.org/abs/2207.05987) (July 2022) +- [On the Advance of Making Language Models Better Reasoners](https://arxiv.org/abs/2206.02336) (June 2022) +- [Large Language Models are Zero-Shot Reasoners](https://arxiv.org/abs/2205.11916) (May 2022) +- [Maieutic Prompting: Logically Consistent Reasoning with Recursive Explanations](https://arxiv.org/abs/2205.11822) (May 2022) +- [MRKL Systems: A modular, neuro-symbolic architecture that combines large language models, external knowledge sources and discrete reasoning](https://arxiv.org/abs/2205.00445) (May 2022) +- [PPT: Pre-trained Prompt Tuning for Few-shot Learning](https://aclanthology.org/2022.acl-long.576/) (Mqy 2022) +- [Toxicity Detection with Generative Prompt-based Inference](https://arxiv.org/abs/2205.12390) (May 2022) +- [Learning to Transfer Prompts for Text Generation](https://arxiv.org/abs/2205.01543) (May 2022) +- [The Unreliability of Explanations in Few-shot Prompting for Textual Reasoning](https://arxiv.org/abs/2205.03401) (May 2022) +- [A Taxonomy of Prompt Modifiers for Text-To-Image Generation](https://arxiv.org/abs/2204.13988) (April 2022) +- [PromptChainer: Chaining Large Language Model Prompts through Visual Programming](https://arxiv.org/abs/2203.06566) (March 2022) +- [Self-Consistency Improves Chain of Thought Reasoning in Language Models](https://arxiv.org/abs/2203.11171) (March 2022) +- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) +- [Rethinking the Role of Demonstrations: What Makes In-Context Learning Work?](https://arxiv.org/abs/2202.12837) (February 2022) +- [Chain of Thought Prompting Elicits Reasoning in Large Language Models](https://arxiv.org/abs/2201.11903) (January 2022) +- [Show Your Work: Scratchpads for Intermediate Computation with Language Models](https://arxiv.org/abs/2112.00114) (November 2021) +- [AI Chains: Transparent and Controllable Human-AI Interaction by Chaining Large Language Model Prompts](https://arxiv.org/abs/2110.01691) (October 2021) +- [Generated Knowledge Prompting for Commonsense Reasoning](https://arxiv.org/abs/2110.08387) (October 2021) +- [Multitask Prompted Training Enables Zero-Shot Task Generalization](https://arxiv.org/abs/2110.08207) (October 2021) +- [Reframing Instructional Prompts to GPTk's Language](https://arxiv.org/abs/2109.07830) (September 2021) +- [Design Guidelines for Prompt Engineering Text-to-Image Generative Models](https://arxiv.org/abs/2109.06977) (September 2021) +- [Making Pre-trained Language Models Better Few-shot Learners](https://aclanthology.org/2021.acl-long.295) (August 2021) +- [Fantastically Ordered Prompts and Where to Find Them: Overcoming Few-Shot Prompt Order Sensitivity](https://arxiv.org/abs/2104.08786) (April 2021) +- [BERTese: Learning to Speak to BERT](https://aclanthology.org/2021.eacl-main.316) (April 2021) +- [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691) (April 2021) +- [Prompt Programming for Large Language Models: Beyond the Few-Shot Paradigm](https://arxiv.org/abs/2102.07350) (February 2021) +- [Calibrate Before Use: Improving Few-Shot Performance of Language Models](https://arxiv.org/abs/2102.09690) (February 2021) +- [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://arxiv.org/abs/2101.00190) (January 2021) +- [Learning to Generate Task-Specific Adapters from Task Description](https://arxiv.org/abs/2101.00420) (January 2021) +- [Making Pre-trained Language Models Better Few-shot Learners](https://arxiv.org/abs/2012.15723) (December 2020) +- [Learning from Task Descriptions](https://aclanthology.org/2020.emnlp-main.105/) (November 2020) +- [AutoPrompt: Eliciting Knowledge from Language Models with Automatically Generated Prompts](https://arxiv.org/abs/2010.15980) (October 2020) +- [Language Models are Few-Shot Learners](https://arxiv.org/abs/2005.14165) (May 2020) +- [How Can We Know What Language Models Know?](https://direct.mit.edu/tacl/article/doi/10.1162/tacl_a_00324/96460/How-Can-We-Know-What-Language-Models-Know) (July 2020) +- [Scaling Laws for Neural Language Models](https://arxiv.org/abs/2001.08361) (January 2020) ## 应用 - - [PaLM 2 Technical Report](https://ai.google/static/documents/palm2techreport.pdf) (May 2023) - - [BloombergGPT: A Large Language Model for Finance](https://arxiv.org/abs/2303.17564) (March 2023) - - [Medical Intervention Duration Estimation Using Language-enhanced Transformer Encoder with Medical Prompts](https://arxiv.org/abs/2303.17408) (March 2023) - - [Soft-prompt tuning to predict lung cancer using primary care free-text Dutch medical notes](https://arxiv.org/abs/2303.15846) (March 2023) - - [TaskMatrix.AI: Completing Tasks by Connecting Foundation Models with Millions of APIs](https://arxiv.org/abs/2303.16434) (March 2023) - - [Larger Probes Tell a Different Story: Extending Psycholinguistic Datasets Via In-Context Learning](https://arxiv.org/abs/2303.16445) (March 2023) - - [Linguistically Informed ChatGPT Prompts to Enhance Japanese-Chinese Machine Translation: A Case Study on Attributive Clauses](https://arxiv.org/abs/2303.15587) (March 2023) - - [Knowledge-augmented Frame Semantic Parsing with Hybrid Prompt-tuning](https://arxiv.org/abs/2303.14375) (March 2023) - - [Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation](https://arxiv.org/abs/2303.15413) (March 2023) - - [Zero-shot Model Diagnosis](https://arxiv.org/abs/2303.15441#) (March 2023) - - [Prompting Large Language Models to Generate Code-Mixed Texts: The Case of South East Asian Languages](https://arxiv.org/abs/2303.13592) (March 2023) - - [SPeC: A Soft Prompt-Based Calibration on Mitigating Performance Variability in Clinical Notes Summarization](https://arxiv.org/abs/2303.13035) (March 2023) - - [Large Language Models and Simple, Stupid Bugs](https://arxiv.org/abs/2303.11455) (March 2023) - - [Can Generative Pre-trained Transformers (GPT) Pass Assessments in Higher Education Programming Courses?](https://arxiv.org/abs/2303.09325) (Mar 2023) - - [SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models](https://arxiv.org/abs/2303.08896) (Mar 2023) - - [ICL-D3IE: In-Context Learning with Diverse Demonstrations Updating for Document Information Extraction](https://arxiv.org/abs/2303.05063) (March 2023) - - [MathPrompter: Mathematical Reasoning using Large Language Models](https://arxiv.org/abs/2303.05398) (March 2023) - - [Prompt-Based Learning for Thread Structure Prediction in Cybersecurity Forums](https://arxiv.org/abs/2303.05400) (March 2023) - - [Choice Over Control: How Users Write with Large Language Models using Diegetic and Non-Diegetic Prompting](https://arxiv.org/abs/2303.03199) (March 2023) - - [Prompting Large Language Models with Answer Heuristics for Knowledge-based Visual Question Answering](https://arxiv.org/abs/2303.01903) (March 2023) - - [Soft Prompt Guided Joint Learning for Cross-Domain Sentiment Analysis](https://arxiv.org/abs/2303.00815) (March 2023) - - [SpeechPrompt v2: Prompt Tuning for Speech Classification Tasks](https://arxiv.org/abs/2303.00733) (March 2023) - - [Goal Driven Discovery of Distributional Differences via Language Descriptions](https://arxiv.org/abs/2302.14233) (Feb 2023) - - [Navigating the Grey Area: Expressions of Overconfidence and Uncertainty in Language Models](https://arxiv.org/abs/2302.13439) (Feb 2023) - - [TabGenie: A Toolkit for Table-to-Text Generation](https://arxiv.org/abs/2302.14169) (Feb 2023) - - [SGL-PT: A Strong Graph Learner with Graph Prompt Tuning](https://arxiv.org/abs/2302.12449) (Feb 2023) - - [Few-Shot Table-to-Text Generation with Prompt-based Adapter](https://arxiv.org/abs/2302.12468) (Feb 2023) - - [Language Models Are Few-shot Learners for Prognostic Prediction](https://arxiv.org/abs/2302.12692) (Feb 2023) - - [STA: Self-controlled Text Augmentation for Improving Text Classifications](https://arxiv.org/abs/2302.12784) (Feb 2023) - - [Check Your Facts and Try Again: Improving Large Language Models with External Knowledge and Automated Feedback](https://arxiv.org/abs/2302.12813) (Feb 2023) - - [How Generative AI models such as ChatGPT can be (Mis)Used in SPC Practice, Education, and Research? An Exploratory Study](https://arxiv.org/abs/2302.10916) (Feb 2023) - - [Grimm in Wonderland: Prompt Engineering with Midjourney to Illustrate Fairytales](https://arxiv.org/abs/2302.08961) (Feb 2023) - - [LabelPrompt: Effective Prompt-based Learning for Relation Classification](https://arxiv.org/abs/2302.08068) (Feb 2023) - - [Language Model Crossover: Variation through Few-Shot Prompting](https://arxiv.org/abs/2302.09236) (Feb 2023) - - [Prompt Tuning of Deep Neural Networks for Speaker-adaptive Visual Speech Recognition](https://arxiv.org/abs/2302.08102) (Feb 2023) - - [The Capacity for Moral Self-Correction in Large Language Models](https://arxiv.org/abs/2302.07459) (Feb 2023) - - [Prompting for Multimodal Hateful Meme Classification](https://arxiv.org/abs/2302.04156) (Feb 2023) - - [PLACES: Prompting Language Models for Social Conversation Synthesis](https://arxiv.org/abs/2302.03269) (Feb 2023) - - [Commonsense-Aware Prompting for Controllable Empathetic Dialogue Generation](https://arxiv.org/abs/2302.01441) (Feb 2023) - - [Crawling the Internal Knowledge-Base of Language Models](https://arxiv.org/abs/2301.12810) (Jan 2023) - - [Legal Prompt Engineering for Multilingual Legal Judgement Prediction](https://arxiv.org/abs/2212.02199) (Dec 2022) - - [Investigating Prompt Engineering in Diffusion Models](https://arxiv.org/abs/2211.15462) (Nov 2022) - - [Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering](https://arxiv.org/abs/2209.09513v2) (Sep 2022) - - [Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language](https://arxiv.org/abs/2210.15157) (Oct 2022) - - [Piloting Copilot and Codex: Hot Temperature, Cold Prompts, or Black Magic?](https://arxiv.org/abs/2210.14699) (Oct 2022) - - [Plot Writing From Scratch Pre-Trained Language Models](https://aclanthology.org/2022.inlg-main.5) (July 2022) - - [Survey of Hallucination in Natural Language Generation](https://arxiv.org/abs/2202.03629) (Feb 2022) + +- [PromptRE: Weakly-Supervised Document-Level Relation Extraction via Prompting-Based Data Programming](https://arxiv.org/abs/2310.09265) (October 2023) +- [Prompting Large Language Models with Chain-of-Thought for Few-Shot Knowledge Base Question Generation](https://arxiv.org/abs/2310.08395) (October 2023) +- [Who Wrote it and Why? Prompting Large-Language Models for Authorship Verification](https://arxiv.org/abs/2310.08123) (October 2023) +- [Promptor: A Conversational and Autonomous Prompt Generation Agent for Intelligent Text Entry Techniques](https://arxiv.org/abs/2310.08101) (October 2023) +- [Thought Propagation: An Analogical Approach to Complex Reasoning with Large Language Models](https://arxiv.org/abs/2310.03965) (October 2023) +- [From Sparse to Dense: GPT-4 Summarization with Chain of Density Prompting](https://arxiv.org/abs/2309.04269) (September 2023) +- [Self-Taught Optimizer (STOP): Recursively Self-Improving Code Generation](https://arxiv.org/abs/2310.02304) (October 2023) +- [Think before you speak: Training Language Models With Pause Tokens](https://arxiv.org/abs/2310.02226) (October 2023) +- [(Dynamic) Prompting might be all you need to repair Compressed LLMs](https://arxiv.org/abs/2310.00867) (October 2023) +- [In-Context Learning in Large Language Models: A Neuroscience-inspired Analysis of Representations](https://arxiv.org/abs/2310.00313) (September 2023) +- [Understanding In-Context Learning from Repetitions](https://arxiv.org/abs/2310.00297) (September 2023) +- [Investigating the Efficacy of Large Language Models in Reflective Assessment Methods through Chain of Thoughts Prompting](https://arxiv.org/abs/2310.00272) (September 2023) +- [Automatic Prompt Rewriting for Personalized Text Generation](https://arxiv.org/abs/2310.00152) (September 2023) +- [Efficient Streaming Language Models with Attention Sinks](https://arxiv.org/abs/2309.17453) (September 2023) +- [The Dawn of LMMs: Preliminary Explorations with GPT-4V(ision)](https://arxiv.org/abs/2309.17421) (September 2023) +- [Graph Neural Prompting with Large Language Models](https://arxiv.org/abs/2309.15427) (September 2023) +- [Large Language Model Alignment: A Survey](https://arxiv.org/abs/2309.15025) (September 2023) +- [Enhancing Zero-Shot Chain-of-Thought Reasoning in Large Language Models through Logic](https://arxiv.org/abs/2309.13339) (September 2023) +- [A Practical Survey on Zero-shot Prompt Design for In-context Learning](https://arxiv.org/abs/2309.13205) (September 2023) +- [EchoPrompt: Instructing the Model to Rephrase Queries for Improved In-context Learning](https://arxiv.org/abs/2309.10687) (September 2023) +- [Prompt, Condition, and Generate: Classification of Unsupported Claims with In-Context Learning](https://arxiv.org/abs/2309.10359) (September 2023) +- [PolicyGPT: Automated Analysis of Privacy Policies with Large Language Models](https://arxiv.org/abs/2309.10238) (September 2023) +- [LLM4Jobs: Unsupervised occupation extraction and standardization leveraging Large Language Models](https://arxiv.org/abs/2309.09708) (September 2023) +- [Summarization is (Almost) Dead](https://arxiv.org/abs/2309.09558) (September 2023) +- [Investigating Zero- and Few-shot Generalization in Fact Verification](https://arxiv.org/abs/2309.09444) (September 2023) +- [Performance of the Pre-Trained Large Language Model GPT-4 on Automated Short Answer Grading](https://arxiv.org/abs/2309.09338) (September 2023) +- [Contrastive Decoding Improves Reasoning in Large Language Models](https://arxiv.org/abs/2309.09117) (September 2023) +- [Struc-Bench: Are Large Language Models Really Good at Generating Complex Structured Data?](https://arxiv.org/abs/2309.08963) (September 2023) +- [Neural Machine Translation Models Can Learn to be Few-shot Learners](https://arxiv.org/abs/2309.08590) (September 2023) +- [Chain-of-Thought Reasoning is a Policy Improvement Operator](https://arxiv.org/abs/2309.08589) (September 2023) +- [ICLEF: In-Context Learning with Expert Feedback for Explainable Style Transfer](https://arxiv.org/abs/2309.08583) (September 2023) +- [When do Generative Query and Document Expansions Fail? A Comprehensive Study Across Methods, Retrievers, and Datasets](https://arxiv.org/abs/2309.08541) (September 2023) +- [Using Large Language Models for Knowledge Engineering (LLMKE): A Case Study on Wikidata](https://arxiv.org/abs/2309.08491) (September 2023) +- [Self-Consistent Narrative Prompts on Abductive Natural Language Inference](https://arxiv.org/abs/2309.08303) (September 2023) +- [Investigating Answerability of LLMs for Long-Form Question Answering](https://arxiv.org/abs/2309.08210) (September 2023) +- [PromptTTS++: Controlling Speaker Identity in Prompt-Based Text-to-Speech Using Natural Language Descriptions](https://arxiv.org/abs/2309.08140) (September 2023) +- [An Empirical Evaluation of Prompting Strategies for Large Language Models in Zero-Shot Clinical Natural Language Processing](https://arxiv.org/abs/2309.08008) (September 2023) +- [Leveraging Contextual Information for Effective Entity Salience Detection](https://arxiv.org/abs/2309.07990) (September 2023) +- [Prompting4Debugging: Red-Teaming Text-to-Image Diffusion Models by Finding Problematic Prompts](https://arxiv.org/abs/2309.06135) (September 2023) +- [PACE: Prompting and Augmentation for Calibrated Confidence Estimation with GPT-4 in Cloud Incident Root Cause Analysis](https://arxiv.org/abs/2309.05833) (September 2023) +- [From Sparse to Dense: GPT-4 Summarization with Chain of Density Prompting](https://arxiv.org/abs/2309.04269) (September 2023) +- [Measuring and Improving Chain-of-Thought Reasoning in Vision-Language Models](https://arxiv.org/abs/2309.04461) (September 2023) +- [Zero-Resource Hallucination Prevention for Large Language Models](https://arxiv.org/abs/2309.02654) (September 2023) +- [Certifying LLM Safety against Adversarial Prompting](https://arxiv.org/abs/2309.02772) (September 2023) +- [Improving Code Generation by Dynamic Temperature Sampling](https://arxiv.org/abs/2309.02772) (September 2023) +- [Prompting a Large Language Model to Generate Diverse Motivational Messages: A Comparison with Human-Written Messages](https://arxiv.org/abs/2308.13479) (August 2023) +- [Financial News Analytics Using Fine-Tuned Llama 2 GPT Model](https://arxiv.org/abs/2308.13032) (August 2023) +- [A Study on Robustness and Reliability of Large Language Model Code Generation](https://arxiv.org/abs/2308.10335) (August 2023) +- [Large Language Models Vote: Prompting for Rare Disease Identification](https://arxiv.org/abs/2308.12890) (August 2023) +- [WizardMath: Empowering Mathematical Reasoning for Large Language Models via Reinforced Evol-Instruct](https://arxiv.org/abs/2308.09583) (August 2023) +- [Tree-of-Mixed-Thought: Combining Fast and Slow Thinking for Multi-hop Visual Reasoning](https://arxiv.org/abs/2308.09658) (August 2023) +- [Graph of Thoughts: Solving Elaborate Problems with Large Language Models](https://arxiv.org/abs/2308.09687) (August 2023) +- [Red-Teaming Large Language Models using Chain of Utterances for Safety-Alignment](https://arxiv.org/abs/2308.09662) (August 2023) +- [Boosting Logical Reasoning in Large Language Models through a New Framework: The Graph of Thought](https://arxiv.org/abs/2308.08614) (August 2023) +- [You Only Prompt Once: On the Capabilities of Prompt Learning on Large Language Models to Tackle Toxic Content](https://arxiv.org/abs/2308.05596) (August 2023) +- [LLM As DBA](https://arxiv.org/abs/2308.05481) (August 2023) +- [Interpretable Math Word Problem Solution Generation Via Step-by-step Planning](https://arxiv.org/abs/2306.00784) (June 2023) +- [In-Context Learning User Simulators for Task-Oriented Dialog Systems](https://arxiv.org/abs/2306.00774) (June 2023) +- [SQL-PaLM: Improved Large Language ModelAdaptation for Text-to-SQL](https://arxiv.org/abs/2306.00739) (June 2023) +- [Effective Structured Prompting by Meta-Learning and Representative Verbalizer](https://arxiv.org/abs/2306.00618) (June 2023) +- [Layout and Task Aware Instruction Prompt for Zero-shot Document Image Question Answering](https://arxiv.org/abs/2306.00526) (June 2023) +- [Chain-Of-Thought Prompting Under Streaming Batch: A Case Study](https://arxiv.org/abs/2306.00550) (June 2023) +- [Red Teaming Language Model Detectors with Language Models](https://arxiv.org/abs/2305.19713) (May 2023) +- [Gorilla: Large Language Model Connected with Massive APIs](https://shishirpatil.github.io/gorilla/) (May 2023) +- [Deliberate then Generate: Enhanced Prompting Framework for Text Generation](https://arxiv.org/abs/2305.19835) (May 2023) +- [What does the Failure to Reason with "Respectively" in Zero/Few-Shot Settings Tell Us about Language Models?](https://arxiv.org/abs/2305.19597) (May 2023) +- [ScoNe: Benchmarking Negation Reasoning in Language Models With Fine-Tuning and In-Context Learning](https://arxiv.org/abs/2305.19426) (May 2023) +- [SheetCopilot: Bringing Software Productivity to the Next Level through Large Language Models](https://arxiv.org/abs/2305.19308) (May 2023) +- [Grammar Prompting for Domain-Specific Language Generation with Large Language Models](https://arxiv.org/abs/2305.19234) (May 2023) +- [Mitigating Label Biases for In-context Learning](https://arxiv.org/abs/2305.19148) (May 2023) +- [Short Answer Grading Using One-shot Prompting and Text Similarity Scoring Model](https://arxiv.org/abs/2305.18638) (May 2023) +- [Strategic Reasoning with Language Models](https://arxiv.org/abs/2305.19165) (May 2023) +- [Dissecting Chain-of-Thought: A Study on Compositional In-Context Learning of MLPs](https://arxiv.org/abs/2305.18869) (May 2023) +- [Marked Personas: Using Natural Language Prompts to Measure Stereotypes in Language Models](https://arxiv.org/abs/2305.18189) (May 2023) +- [Leveraging Training Data in Few-Shot Prompting for Numerical Reasoning](https://arxiv.org/abs/2305.18170) (May 2023) +- [Exploring Effectiveness of GPT-3 in Grammatical Error Correction: A Study on Performance and Controllability in Prompt-Based Methods](https://arxiv.org/abs/2305.18156) (May 2023) +- [NOTABLE: Transferable Backdoor Attacks Against Prompt-based NLP Models](https://arxiv.org/abs/2305.17826) (May 2023) +- [Tab-CoT: Zero-shot Tabular Chain of Thought](https://arxiv.org/abs/2305.17812) (May 2023) +- [Evaluating GPT-3 Generated Explanations for Hateful Content Moderation](https://arxiv.org/abs/2305.17680) (May 2023) +- [Prompt-Guided Retrieval Augmentation for Non-Knowledge-Intensive Tasks](https://arxiv.org/abs/2305.17653) (May 2023) +- [Zero- and Few-Shot Event Detection via Prompt-Based Meta Learning]https://arxiv.org/abs/2305.17373) (May 2023) +- [Chain-of-Thought Hub: A Continuous Effort to Measure Large Language Models' Reasoning Performance](https://arxiv.org/abs/2305.17306) (May 2023) +- [Large Language Models Can be Lazy Learners: Analyze Shortcuts in In-Context Learning](https://arxiv.org/abs/2305.17256) (May 2023) +- [Heterogeneous Value Evaluation for Large Language Models](https://arxiv.org/abs/2305.17147) (May 2023) +- [PromptNER: Prompt Locating and Typing for Named Entity Recognition](https://arxiv.org/abs/2305.17104) (May 2023) +- [Small Language Models Improve Giants by Rewriting Their Outputs](https://arxiv.org/abs/2305.13514v1) (May 2023) +- [On the Planning Abilities of Large Language Models -- A Critical Investigation](https://arxiv.org/abs/2305.15771v1) (May 2023) +- [Beyond Chain-of-Thought, Effective Graph-of-Thought Reasoning in Large Language Models](https://arxiv.org/abs/2305.16582) (May 2023) +- [PRODIGY: Enabling In-context Learning Over Graphs](https://arxiv.org/abs/2305.12600v1) (May 2023) +- [Large Language Models are Few-Shot Health Learners](https://arxiv.org/abs/2305.15525v1) (May 2023) +- [Role-Play with Large Language Models](https://arxiv.org/abs/2305.16367) (May 2023) +- [Measuring Inductive Biases of In-Context Learning with Underspecified Demonstrations](https://arxiv.org/abs/2305.13299v1) (May 2023) +- [Fact-Checking Complex Claims with Program-Guided Reasoning](https://arxiv.org/abs/2305.12744v1) (May 2023) +- [Large Language Models as Tool Makers](https://arxiv.org/abs/2305.17126v1) (May 2023) +- [Iterative Forward Tuning Boosts In-context Learning in Language Models](https://arxiv.org/abs/2305.13016v2) (May 2023) +- [SwiftSage: A Generative Agent with Fast and Slow Thinking for Complex Interactive Tasks](https://arxiv.org/abs/2305.17390v1) (May 2023) +- [Interactive Natural Language Processing](https://arxiv.org/abs/2305.13246v1) (May 2023) +- [An automatically discovered chain-of-thought prompt generalizes to novel models and datasets](https://arxiv.org/abs/2305.02897v1) (May 2023) +- [Large Language Model Guided Tree-of-Thought](https://arxiv.org/abs/2305.08291v1) (May 2023) +- [Active Retrieval Augmented Generation](https://arxiv.org/abs/2305.06983v1) (May 2023) +- [A PhD Student's Perspective on Research in NLP in the Era of Very Large Language Models](https://arxiv.org/abs/2305.12544v1) (May 2023) +- [Visual Chain of Thought: Bridging Logical Gaps with Multimodal Infillings](https://arxiv.org/abs/2305.02317v1) (May 2023) +- [Mirages: On Anthropomorphism in Dialogue Systems](https://arxiv.org/abs/2305.09800v1) (May 2023) +- [Model evaluation for extreme risks](https://arxiv.org/abs/2305.15324v1) (May 2023) +- [Language Models Don't Always Say What They Think: Unfaithful Explanations in Chain-of-Thought Prompting](https://arxiv.org/abs/2305.04388v1) (May 2023) +- [Cognitive Reframing of Negative Thoughts through Human-Language Model Interaction](https://arxiv.org/abs/2305.02466v1) (May 2023) +- [PromptClass: Weakly-Supervised Text Classification with Prompting Enhanced Noise-Robust Self-Training](https://arxiv.org/abs/2305.13723) (May 2023) +- [Augmented Large Language Models with Parametric Knowledge Guiding](https://arxiv.org/abs/2305.04757v2) (May 2023) +- [Aligning Large Language Models through Synthetic Feedback](https://arxiv.org/abs/2305.13735) (May 2023) +- [Concept-aware Training Improves In-context Learning Ability of Language Models](https://arxiv.org/abs/2305.13775) (May 2023) +- [FrugalGPT: How to Use Large Language Models While Reducing Cost and Improving Performance](https://arxiv.org/abs/2305.05176v1) (May 2023) +- [Enhancing Black-Box Few-Shot Text Classification with Prompt-Based Data Augmentation](https://arxiv.org/abs/2305.13785) (May 2023) +- [Detecting automatically the layout of clinical documents to enhance the performances of downstream natural language processing](https://arxiv.org/abs/2305.13817) (May 2023) +- ["Is the Pope Catholic?" Applying Chain-of-Thought Reasoning to Understanding Conversational Implicatures](https://arxiv.org/abs/2305.13826) (May 2023) +- [Let's Think Frame by Frame: Evaluating Video Chain of Thought with Video Infilling and Prediction](https://arxiv.org/abs/2305.13903) (May 2023) +- [Generating Data for Symbolic Language with Large Language Models](https://arxiv.org/abs/2305.13917) (May 2023) +- [Make a Choice! Knowledge Base Question Answering with In-Context Learning](https://arxiv.org/abs/2305.13972) (May 2023) +- [Improving Language Models via Plug-and-Play Retrieval Feedback](https://arxiv.org/abs/2305.14002) (May 2023) +- [Multi-Granularity Prompts for Topic Shift Detection in Dialogue](https://arxiv.org/abs/2305.14006) (May 2023) +- [The CoT Collection: Improving Zero-shot and Few-shot Learning of Language Models via Chain-of-Thought Fine-Tuning](https://arxiv.org/abs/2305.14045) (May 2023) +- [Can Language Models Understand Physical Concepts?](https://arxiv.org/abs/2305.14057) (May 2023) +- [Evaluating Factual Consistency of Summaries with Large Language Models](https://arxiv.org/abs/2305.14069) (May 2023) +- [Dr.ICL: Demonstration-Retrieved In-context Learning](https://arxiv.org/abs/2305.14128) (May 2023) +- [Probing in Context: Toward Building Robust Classifiers via Probing Large Language Models](https://arxiv.org/abs/2305.14171) (May 2023) +- [Skill-Based Few-Shot Selection for In-Context Learning](https://arxiv.org/abs/2305.14210) (May 2023) +- [Exploring Chain-of-Thought Style Prompting for Text-to-SQL](https://arxiv.org/abs/2305.14215) (May 2023) +- [Enhancing Chat Language Models by Scaling High-quality Instructional Conversations](https://arxiv.org/abs/2305.14233) (May 2023) +- [On Learning to Summarize with Large Language Models as References](https://arxiv.org/abs/2305.14239) (May 2023) +- [Learning to Generate Novel Scientific Directions with Contextualized Literature-based Discovery](https://arxiv.org/abs/2305.14259) (May 2023) +- [Active Learning Principles for In-Context Learning with Large Language Models](https://arxiv.org/abs/2305.14264) (May 2023) +- [Two Failures of Self-Consistency in the Multi-Step Reasoning of LLMs](https://arxiv.org/abs/2305.14279) (May 2023) +- [Improving Factuality and Reasoning in Language Models through Multiagent Debate](https://arxiv.org/abs/2305.14325) (May 2023) +- [ChatCoT: Tool-Augmented Chain-of-Thought Reasoning on\\ Chat-based Large Language Models](https://arxiv.org/abs/2305.14323) (May 2023) +- [WikiChat: A Few-Shot LLM-Based Chatbot Grounded with Wikipedia](https://arxiv.org/abs/2305.14292) (May 2023) +- [Query Rewriting for Retrieval-Augmented Large Language Models](https://arxiv.org/abs/2305.14283) (May 2023) +- [Discrete Prompt Optimization via Constrained Generation for Zero-shot Re-ranker](https://arxiv.org/abs/2305.13729) (May 2023) +- [Element-aware Summarization with Large Language Models: Expert-aligned Evaluation and Chain-of-Thought Method](https://arxiv.org/abs/2305.13412) (May 2023) +- [Small Language Models Improve Giants by Rewriting Their Outputs](https://arxiv.org/abs/2305.13514) (May 2023) +- [Prompting and Evaluating Large Language Models for Proactive Dialogues: Clarification, Target-guided, and Non-collaboration](https://arxiv.org/abs/2305.13626) (May 2023) +- [Prompt-Based Monte-Carlo Tree Search for Goal-Oriented Dialogue Policy Planning](https://arxiv.org/abs/2305.13660) (May 2023) +- [Mitigating Language Model Hallucination with Interactive Question-Knowledge Alignment](https://arxiv.org/abs/2305.13669) (May 2023) +- [Making Language Models Better Tool Learners with Execution Feedback](https://arxiv.org/abs/2305.13068) (May 2023) +- [Text-to-SQL Error Correction with Language Models of Code](https://arxiv.org/abs/2305.13073) (May 2023) +- [Decomposed Prompting for Machine Translation Between Related Languages using Large Language Models](https://arxiv.org/abs/2305.13085) (May 2023) +- [SPARSEFIT: Few-shot Prompting with Sparse Fine-tuning for Jointly Generating Predictions and Natural Language Explanations](https://arxiv.org/abs/2305.13235) (May 2023) +- ["According to ..." Prompting Language Models Improves Quoting from Pre-Training Data](https://arxiv.org/abs/2305.13252) (May 2023) +- [Prompt-based methods may underestimate large language models' linguistic generalizations](https://arxiv.org/abs/2305.13264) (May 2023) +- [Chain of Knowledge: A Framework for Grounding Large Language Models with Structured Knowledge Bases](https://arxiv.org/abs/2305.13269) (May 2023) +- [Measuring Inductive Biases of In-Context Learning with Underspecified Demonstrations](https://arxiv.org/abs/2305.13299) (May 2023) +- [Automated Few-shot Classification with Instruction-Finetuned Language Models](https://arxiv.org/abs/2305.12576) (May 2023) +- [Enhancing Few-shot Text-to-SQL Capabilities of Large Language Models: A Study on Prompt Design Strategies](https://arxiv.org/abs/2305.12586) (May 2023) +- [MvP: Multi-view Prompting Improves Aspect Sentiment Tuple Prediction](https://arxiv.org/abs/2305.12627) (May 2023) +- [Learning Interpretable Style Embeddings via Prompting LLMs](https://arxiv.org/abs/2305.12696) (May 2023) +- [Enhancing Small Medical Learners with Privacy-preserving Contextual Prompting](https://arxiv.org/abs/2305.12723) (May 2023) +- [Fact-Checking Complex Claims with Program-Guided Reasoning](https://arxiv.org/abs/2305.12744) (May 2023) +- [A Benchmark on Extremely Weakly Supervised Text Classification: Reconcile Seed Matching and Prompting Approaches](https://arxiv.org/abs/2305.12749) (May 2023) +- [This Prompt is Measuring \: Evaluating Bias Evaluation in Language Models](https://arxiv.org/abs/2305.12757) (May 2023) +- [Enhancing Cross-lingual Natural Language Inference by Soft Prompting with Multilingual Verbalizer](https://arxiv.org/abs/2305.12761) (May 2023) +- [Evaluating Prompt-based Question Answering for Object Prediction in the Open Research Knowledge Graph](https://arxiv.org/abs/2305.12900) (May 2023) +- [Explaining How Transformers Use Context to Build Predictions](https://arxiv.org/abs/2305.12535) (May 2023) +- [PiVe: Prompting with Iterative Verification Improving Graph-based Generative Capability of LLMs](https://arxiv.org/abs/2305.12392) (May 2023) +- [PromptNER: A Prompting Method for Few-shot Named Entity Recognition via k Nearest Neighbor Search](https://arxiv.org/abs/2305.12217) (May 2023) +- [Logic-LM: Empowering Large Language Models with Symbolic Solvers for Faithful Logical Reasoning](https://arxiv.org/abs/2305.12295) (May 2023) +- [Enhancing Few-shot NER with Prompt Ordering based Data Augmentation](https://arxiv.org/abs/2305.11791) (May 2023) +- [Chain-of-thought prompting for responding to in-depth dialogue questions with LLM](https://arxiv.org/abs/2305.11792) (May 2023) +- [How to Prompt LLMs for Text-to-SQL: A Study in Zero-shot, Single-domain, and Cross-domain Settings](https://arxiv.org/abs/2305.11853) (May 2023) +- [Evaluation of medium-large Language Models at zero-shot closed book generative question answering](https://arxiv.org/abs/2305.11991) (May 2023) +- [Few-Shot Dialogue Summarization via Skeleton-Assisted Prompt Transfer](https://arxiv.org/abs/2305.12077) (May 2023) +- [Can NLP Models Correctly Reason Over Contexts that Break the Common Assumptions?](https://arxiv.org/abs/2305.12096) (May 2023) +- [Reasoning Implicit Sentiment with Chain-of-Thought Prompting](https://arxiv.org/abs/2305.11255) (May 2023) +- [Writing your own book: A method for going from closed to open book QA to improve robustness and performance of smaller LLMs](https://arxiv.org/abs/2305.11334) (May 2023) +- [AutoTrial: Prompting Language Models for Clinical Trial Design](https://arxiv.org/abs/2305.11366) (May 2023) +- [CRITIC: Large Language Models Can Self-Correct with Tool-Interactive Critiquing](https://arxiv.org/abs/2305.11738) (May 2023) +- [Controlling the Extraction of Memorized Data from Large Language Models via Prompt-Tuning](https://arxiv.org/abs/2305.11759) (May 2023) +- [Prompting with Pseudo-Code Instructions](https://arxiv.org/abs/2305.11790) (May 2023) +- [TrueTeacher: Learning Factual Consistency Evaluation with Large Language Models](https://arxiv.org/abs/2305.11171) (May 2023) +- [Aligning Instruction Tasks Unlocks Large Language Models as Zero-Shot Relation Extractors](https://arxiv.org/abs/2305.11159) (May 2023) +- [Exploiting Biased Models to De-bias Text: A Gender-Fair Rewriting Model](https://arxiv.org/abs/2305.11140) (May 2023) +- [Learning In-context Learning for Named Entity Recognition](https://arxiv.org/abs/2305.11038) (May 2023) +- [Take a Break in the Middle: Investigating Subgoals towards Hierarchical Script Generation](https://arxiv.org/abs/2305.10907) (May 2023) +- [TEPrompt: Task Enlightenment Prompt Learning for Implicit Discourse Relation Recognition](https://arxiv.org/abs/2305.10866) (May 2023) +- [Large Language Models can be Guided to Evade AI-Generated Text Detection](https://arxiv.org/abs/2305.10847) (May 2023) +- [Temporal Knowledge Graph Forecasting Without Knowledge Using In-Context Learning](https://arxiv.org/abs/2305.10613) (May 2023) +- [Prompting the Hidden Talent of Web-Scale Speech Models for Zero-Shot Task Generalization](https://arxiv.org/abs/2305.11095) (May 2023) +- [Think Outside the Code: Brainstorming Boosts Large Language Models in Code Generation](https://arxiv.org/abs/2305.10679) (May 2023) +- [Improving Language Model Negotiation with Self-Play and In-Context Learning from AI Feedback](https://arxiv.org/abs/2305.10142) (May 2023) +- [ConvXAI: Delivering Heterogeneous AI Explanations via Conversations to Support Human-AI Scientific Writing](https://arxiv.org/abs/2305.09770) (May 2023) +- [StructGPT: A General Framework for Large Language Model to Reason over Structured Data](https://arxiv.org/abs/2305.09645) (May 2023) +- [Towards Expert-Level Medical Question Answering with Large Language Models](https://arxiv.org/abs/2305.09617) (May 2023) +- [Large Language Models are Built-in Autoregressive Search Engines](https://arxiv.org/abs/2305.09612) (May 2023) +- [MsPrompt: Multi-step Prompt Learning for Debiasing Few-shot Event Detection](https://arxiv.org/abs/2305.09335) (May 2023) +- [Exploring the Impact of Layer Normalization for Zero-shot Neural Machine Translation](https://arxiv.org/abs/2305.09312) (May 2023) +- [SGP-TOD: Building Task Bots Effortlessly via Schema-Guided LLM Prompting](https://arxiv.org/abs/2305.09067) (May 2023) +- [Multi-modal Visual Understanding with Prompts for Semantic Information Disentanglement of Image](https://arxiv.org/abs/2305.09333) (May 2023) +- [Soft Prompt Decoding for Multilingual Dense Retrieval](https://arxiv.org/abs/2305.09025) (May 2023) +- [PaLM 2 Technical Report](https://ai.google/static/documents/palm2techreport.pdf) (May 2023) +- [Are LLMs All You Need for Task-Oriented Dialogue?](https://arxiv.org/abs/2304.06556) (April 2023) +- [HiPrompt: Few-Shot Biomedical Knowledge Fusion via Hierarchy-Oriented Prompting](https://arxiv.org/abs/2304.05973) (April 2023) +- [Approximating Human Evaluation of Social Chatbots with Prompting](https://arxiv.org/abs/2304.05253) (April 2023) +- [Automated Reading Passage Generation with OpenAI's Large Language Model](https://arxiv.org/abs/2304.04616) (April 2023) +- [WebBrain: Learning to Generate Factually Correct Articles for Queries by Grounding on Large Web Corpus](https://arxiv.org/abs/2304.04358) (April 2023) +- [Prompt Pre-Training with Twenty-Thousand Classes for Open-Vocabulary Visual Recognition](https://arxiv.org/abs/2304.04704) (April 2023) +- [GPT detectors are biased against non-native English writers](https://arxiv.org/abs/2304.02819) (April 2023) +- [Zero-Shot Next-Item Recommendation using Large Pretrained Language Models](https://arxiv.org/abs/2304.03153) (April 2023) +- [Large Language Models as Master Key: Unlocking the Secrets of Materials Science with GPT](https://arxiv.org/abs/2304.02213) (April 2023) +- [Efficiently Aligned Cross-Lingual Transfer Learning for Conversational Tasks using Prompt-Tuning](https://arxiv.org/abs/2304.01295) (April 2023) +- [Better Language Models of Code through Self-Improvement](https://arxiv.org/abs/2304.01228) (April 2023) +- [PromptORE -- A Novel Approach Towards Fully Unsupervised Relation Extraction](https://arxiv.org/abs/2304.01209) (April 2023) +- [Assessing Language Model Deployment with Risk Cards]() (April 2023) +- [Enhancing Large Language Models with Climate Resources](https://arxiv.org/abs/2304.00116) (March 2023) +- [BloombergGPT: A Large Language Model for Finance](https://arxiv.org/abs/2303.17564) (March 2023) +- [Medical Intervention Duration Estimation Using Language-enhanced Transformer Encoder with Medical Prompts](https://arxiv.org/abs/2303.17408) (March 2023) +- [Soft-prompt tuning to predict lung cancer using primary care free-text Dutch medical notes](https://arxiv.org/abs/2303.15846) (March 2023) +- [TaskMatrix.AI: Completing Tasks by Connecting Foundation Models with Millions of APIs](https://arxiv.org/abs/2303.16434) (March 2023) +- [Larger Probes Tell a Different Story: Extending Psycholinguistic Datasets Via In-Context Learning](https://arxiv.org/abs/2303.16445) (March 2023) +- [Linguistically Informed ChatGPT Prompts to Enhance Japanese-Chinese Machine Translation: A Case Study on Attributive Clauses](https://arxiv.org/abs/2303.15587) (March 2023) +- [Knowledge-augmented Frame Semantic Parsing with Hybrid Prompt-tuning](https://arxiv.org/abs/2303.14375) (March 2023) +- [Debiasing Scores and Prompts of 2D Diffusion for Robust Text-to-3D Generation](https://arxiv.org/abs/2303.15413) (March 2023) +- [Zero-shot Model Diagnosis](https://arxiv.org/abs/2303.15441#) (March 2023) +- [Prompting Large Language Models to Generate Code-Mixed Texts: The Case of South East Asian Languages](https://arxiv.org/abs/2303.13592) (March 2023) +- [SPeC: A Soft Prompt-Based Calibration on Mitigating Performance Variability in Clinical Notes Summarization](https://arxiv.org/abs/2303.13035) (March 2023) +- [Large Language Models and Simple, Stupid Bugs](https://arxiv.org/abs/2303.11455) (March 2023) +- [Can Generative Pre-trained Transformers (GPT) Pass Assessments in Higher Education Programming Courses?](https://arxiv.org/abs/2303.09325) (March 2023) +- [SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models](https://arxiv.org/abs/2303.08896) (March 2023) +- [Large Language Models in the Workplace: A Case Study on Prompt Engineering for Job Type Classification](https://arxiv.org/abs/2303.07142) (March 2023) +- [ICL-D3IE: In-Context Learning with Diverse Demonstrations Updating for Document Information Extraction](https://arxiv.org/abs/2303.05063) (March 2023) +- [MathPrompter: Mathematical Reasoning using Large Language Models](https://arxiv.org/abs/2303.05398) (March 2023) +- [Prompt-Based Learning for Thread Structure Prediction in Cybersecurity Forums](https://arxiv.org/abs/2303.05400) (March 2023) +- [Choice Over Control: How Users Write with Large Language Models using Diegetic and Non-Diegetic Prompting](https://arxiv.org/abs/2303.03199) (March 2023) +- [Prompting Large Language Models with Answer Heuristics for Knowledge-based Visual Question Answering](https://arxiv.org/abs/2303.01903) (March 2023) +- [Soft Prompt Guided Joint Learning for Cross-Domain Sentiment Analysis](https://arxiv.org/abs/2303.00815) (March 2023) +- [SpeechPrompt v2: Prompt Tuning for Speech Classification Tasks](https://arxiv.org/abs/2303.00733) (March 2023) +- [Goal Driven Discovery of Distributional Differences via Language Descriptions](https://arxiv.org/abs/2302.14233) (February 2023) +- [Navigating the Grey Area: Expressions of Overconfidence and Uncertainty in Language Models](https://arxiv.org/abs/2302.13439) (February 2023) +- [TabGenie: A Toolkit for Table-to-Text Generation](https://arxiv.org/abs/2302.14169) (February 2023) +- [SGL-PT: A Strong Graph Learner with Graph Prompt Tuning](https://arxiv.org/abs/2302.12449) (February 2023) +- [Few-Shot Table-to-Text Generation with Prompt-based Adapter](https://arxiv.org/abs/2302.12468) (February 2023) +- [Language Models Are Few-shot Learners for Prognostic Prediction](https://arxiv.org/abs/2302.12692) (February 2023) +- [STA: Self-controlled Text Augmentation for Improving Text Classifications](https://arxiv.org/abs/2302.12784) (February 2023) +- [Check Your Facts and Try Again: Improving Large Language Models with External Knowledge and Automated Feedback](https://arxiv.org/abs/2302.12813) (February 2023) +- [How Generative AI models such as ChatGPT can be (Mis)Used in SPC Practice, Education, and Research? An Exploratory Study](https://arxiv.org/abs/2302.10916) (February 2023) +- [Grimm in Wonderland: Prompt Engineering with Midjourney to Illustrate Fairytales](https://arxiv.org/abs/2302.08961) (February 2023) +- [LabelPrompt: Effective Prompt-based Learning for Relation Classification](https://arxiv.org/abs/2302.08068) (February 2023) +- [Language Model Crossover: Variation through Few-Shot Prompting](https://arxiv.org/abs/2302.09236) (February 2023) +- [Prompt Tuning of Deep Neural Networks for Speaker-adaptive Visual Speech Recognition](https://arxiv.org/abs/2302.08102) (February 2023) +- [The Capacity for Moral Self-Correction in Large Language Models](https://arxiv.org/abs/2302.07459) (February 2023) +- [Prompting for Multimodal Hateful Meme Classification](https://arxiv.org/abs/2302.04156) (February 2023) +- [PLACES: Prompting Language Models for Social Conversation Synthesis](https://arxiv.org/abs/2302.03269) (February 2023) +- [Toolformer: Language Models Can Teach Themselves to Use Tools](https://arxiv.org/abs/2302.04761) (February 2023) +- [Commonsense-Aware Prompting for Controllable Empathetic Dialogue Generation](https://arxiv.org/abs/2302.01441) (February 2023) +- [Crawling the Internal Knowledge-Base of Language Models](https://arxiv.org/abs/2301.12810) (January 2023) +- [Legal Prompt Engineering for Multilingual Legal Judgement Prediction](https://arxiv.org/abs/2212.02199) (December 2022) +- [Investigating Prompt Engineering in Diffusion Models](https://arxiv.org/abs/2211.15462) (November 2022) +- [Learn to Explain: Multimodal Reasoning via Thought Chains for Science Question Answering](https://arxiv.org/abs/2209.09513v2) (September 2022) +- [Conversing with Copilot: Exploring Prompt Engineering for Solving CS1 Problems Using Natural Language](https://arxiv.org/abs/2210.15157) (October 2022) +- [Piloting Copilot and Codex: Hot Temperature, Cold Prompts, or Black Magic?](https://arxiv.org/abs/2210.14699) (October 2022) +- [Plot Writing From Scratch Pre-Trained Language Models](https://aclanthology.org/2022.inlg-main.5) (July 2022) +- [Survey of Hallucination in Natural Language Generation](https://arxiv.org/abs/2202.03629) (February 2022) + ## 收集 - - [Chain-of-Thought Papers](https://github.com/Timothyxxx/Chain-of-ThoughtsPapers) - - [Papers with Code](https://paperswithcode.com/task/prompt-engineering) - - [Prompt Papers](https://github.com/thunlp/PromptPapers#papers) \ No newline at end of file +- [Chain-of-Thought Papers](https://github.com/Timothyxxx/Chain-of-ThoughtsPapers) +- [Papers with Code](https://paperswithcode.com/task/prompt-engineering) +- [Prompt Papers](https://github.com/thunlp/PromptPapers#papers) diff --git a/pages/prompts.zh.mdx b/pages/prompts.zh.mdx index dbb0d3047..1f1adf0fe 100644 --- a/pages/prompts.zh.mdx +++ b/pages/prompts.zh.mdx @@ -1,3 +1,8 @@ # Prompt Hub -This page needs a translation! Feel free to contribute a translation by clicking the `Edit this page` button on the right. \ No newline at end of file +import PromptFiles from 'components/PromptFiles' + +Prompt Hub 是一个汇集各类提示语的资源库,旨在测试大型语言模型(LLMs)在多种基础能力与复杂任务上的表现。我们希望 Prompt Hub 能帮助您探索有趣的模型应用方式、进行模型实验、并基于 LLM 构建创新项目。我们诚挚欢迎来自人工智能研究所与开发社区的积极参与和贡献。 + + + \ No newline at end of file diff --git a/pages/readings.zh.mdx b/pages/readings.zh.mdx index 4728e08cc..ab0d53d7c 100644 --- a/pages/readings.zh.mdx +++ b/pages/readings.zh.mdx @@ -1,31 +1,40 @@ # 阅读推荐 #### (按名称排序) + - [【徹底解説】これからのエンジニアの必携スキル、プロンプトエンジニアリングの手引「Prompt Engineering Guide」を読んでまとめてみた](https://dev.classmethod.jp/articles/how-to-design-prompt-engineering/) +- [2023 AI Index Report](https://aiindex.stanford.edu/report/) - [3 Principles for prompt engineering with GPT-3](https://www.linkedin.com/pulse/3-principles-prompt-engineering-gpt-3-ben-whately) - [A beginner-friendly guide to generative language models - LaMBDA guide](https://aitestkitchen.withgoogle.com/how-lamda-works) - [A Complete Introduction to Prompt Engineering for Large Language Models](https://www.mihaileric.com/posts/a-complete-introduction-to-prompt-engineering) - [A Generic Framework for ChatGPT Prompt Engineering](https://medium.com/@thorbjoern.heise/a-generic-framework-for-chatgpt-prompt-engineering-7097f6513a0b) -- [An SEO’s guide to ChatGPT prompts](https://searchengineland.com/chatgpt-prompts-seo-393523) - [AI Content Generation](https://www.jonstokes.com/p/ai-content-generation-part-1-machine) -- [AI's rise generates new job title: Prompt engineer](https://www.axios.com/2023/02/22/chatgpt-prompt-engineers-ai-job) - [AI Safety, RLHF, and Self-Supervision - Jared Kaplan | Stanford MLSys #79](https://www.youtube.com/watch?v=fqC3D-zNJUM&ab_channel=StanfordMLSysSeminars) -- [Awesome Textual Instruction Learning Papers](https://github.com/RenzeLou/awesome-instruction-learning) +- [AI's rise generates new job title: Prompt engineer](https://www.axios.com/2023/02/22/chatgpt-prompt-engineers-ai-job) +- [An SEO’s guide to ChatGPT prompts](https://searchengineland.com/chatgpt-prompts-seo-393523) +- [Anyone can Design! With a little help from Generative AI](https://github.com/YashSharma/PromptEngineering) - [Awesome ChatGPT Prompts](https://github.com/f/awesome-chatgpt-prompts) +- [Awesome Textual Instruction Learning Papers](https://github.com/RenzeLou/awesome-instruction-learning) - [Best 100+ Stable Diffusion Prompts](https://mpost.io/best-100-stable-diffusion-prompts-the-most-beautiful-ai-text-to-image-prompts) - [Best practices for prompt engineering with OpenAI API](https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-openai-api) - [Building GPT-3 applications — beyond the prompt](https://medium.com/data-science-at-microsoft/building-gpt-3-applications-beyond-the-prompt-504140835560) - [Can AI really be protected from text-based attacks?](https://techcrunch.com/2023/02/24/can-language-models-really-be-protected-from-text-based-attacks/) -- [ChatGPT, AI and GPT-3 Apps and use cases](https://gpt3demo.com) +- [ChatGPT Plugins Collection ⭐️ (unofficial)](https://github.com/logankilpatrick/ChatGPT-Plugins-Collection) - [ChatGPT Prompts](https://twitter.com/aaditsh/status/1636398208648658945?s=20) +- [ChatGPT, AI and GPT-3 Apps and use cases](https://gpt3demo.com) +- [ChatGPT3 Prompt Engineering](https://github.com/mattnigh/ChatGPT3-Free-Prompt-List) - [CMU Advanced NLP 2022: Prompting](https://youtube.com/watch?v=5ef83Wljm-M&feature=shares) - [Common Sense as Dark Matter - Yejin Choi | Stanford MLSys #78](https://youtube.com/live/n4HakBqoCVg?feature=shares) - [Create images with your words – Bing Image Creator comes to the new Bing](https://blogs.microsoft.com/blog/2023/03/21/create-images-with-your-words-bing-image-creator-comes-to-the-new-bing/) +- [CS 324 - Advances in Foundation Models](https://stanford-cs324.github.io/winter2023/) +- [CS224N: Natural Language Processing with Deep Learning](https://web.stanford.edu/class/cs224n/) +- [CS324 - Large Language Models](https://stanford-cs324.github.io/winter2022/) - [Curtis64's set of prompt gists](https://gist.github.com/Curtis-64) -- [DALL·E 2 Prompt Engineering Guide](https://docs.google.com/document/d/11WlzjBT0xRpQhP9tFMtxzd0q6ANIdHPUBkMV-YB043U/edit#) - [DALL·E 2 Preview - Risks and Limitations](https://github.com/openai/dalle-2-preview/blob/main/system-card.md) +- [DALL·E 2 Prompt Engineering Guide](https://docs.google.com/document/d/11WlzjBT0xRpQhP9tFMtxzd0q6ANIdHPUBkMV-YB043U/edit#) - [DALLE Prompt Book](https://dallery.gallery/the-dalle-2-prompt-book) - [DALL-E, Make Me Another Picasso, Please](https://www.newyorker.com/magazine/2022/07/11/dall-e-make-me-another-picasso-please?) - [Diffusion Models: A Practical Guide](https://scale.com/guides/diffusion-models-guide) +- [Eight Things to Know about Large Language Models](https://arxiv.org/pdf/2304.00612v1.pdf) - [Exploiting GPT-3 Prompts](https://twitter.com/goodside/status/1569128808308957185) - [Exploring Prompt Injection Attacks](https://research.nccgroup.com/2022/12/05/exploring-prompt-injection-attacks) - [Extrapolating to Unnatural Language Processing with GPT-3's In-context Learning: The Good, the Bad, and the Mysterious](http://ai.stanford.edu/blog/in-context-learning) @@ -36,6 +45,7 @@ - [Giving GPT-3 a Turing Test](https://lacker.io/ai/2020/07/06/giving-gpt-3-a-turing-test.html) - [GPT-3 & Beyond](https://youtube.com/watch?v=-lnHHWRCDGk) - [GPT3 and Prompts: A quick primer](https://buildspace.so/notes/intro-to-gpt3-prompts) +- [GPT-4 Tutorial: How to Chat With Multiple PDF Files (~1000 pages of Tesla's 10-K Annual Reports)](https://youtu.be/Ix9WIZpArm0) - [Hands-on with Bing’s new ChatGPT-like features](https://techcrunch.com/2023/02/08/hands-on-with-the-new-bing/) - [How to Draw Anything](https://andys.page/posts/how-to-draw) - [How to get images that don't suck](https://www.reddit.com/r/StableDiffusion/comments/x41n87/how_to_get_images_that_dont_suck_a) @@ -43,63 +53,71 @@ - [How to perfect your prompt writing for AI generators](https://www.sydney.edu.au/news-opinion/news/2023/02/28/how-to-perfect-your-prompt-writing-for-ai-generators.html) - [How to write good prompts](https://andymatuschak.org/prompts) - [If I Was Starting Prompt Engineering in 2023: My 8 Insider Tips](https://youtube.com/watch?v=SirW7feTjh0&feature=shares) +- [In defense of prompt engineering](https://simonwillison.net/2023/Feb/21/in-defense-of-prompt-engineering/) - [Indirect Prompt Injection on Bing Chat](https://greshake.github.io/) - [Interactive guide to GPT-3 prompt parameters](https://sevazhidkov.com/interactive-guide-to-gpt-3-prompt-parameters) +- [Introduction to ChatGPT](https://www.edx.org/course/introduction-to-chatgpt) - [Introduction to Reinforcement Learning with Human Feedback](https://www.surgehq.ai/blog/introduction-to-reinforcement-learning-with-human-feedback-rlhf-series-part-1) -- [In defense of prompt engineering](https://simonwillison.net/2023/Feb/21/in-defense-of-prompt-engineering/) - [JailBreaking ChatGPT: Everything You Need to Know](https://metaroids.com/learn/jailbreaking-chatgpt-everything-you-need-to-know/) -- [Language Models and Prompt Engineering: Systematic Survey of Prompting Methods in NLP](https://youtube.com/watch?v=OsbUfL8w-mo&feature=shares) - [Language Model Behavior: A Comprehensive Survey](https://arxiv.org/abs/2303.11504) +- [Language Models and Prompt Engineering: Systematic Survey of Prompting Methods in NLP](https://youtube.com/watch?v=OsbUfL8w-mo&feature=shares) - [Learn Prompting](https://learnprompting.org) +- [Learning Prompt](https://github.com/thinkingjimmy/Learning-Prompt) +- [LINGO : Visually Debiasing Natural Language Instructions to Support Task Diversity](https://arxiv.org/abs/2304.06184) +- [Long Context Prompting for Claude 2.1](https://www.anthropic.com/news/claude-2-1-prompting) +- [Make PowerPoint presentations with ChatGPT](https://www.reddit.com/r/AIAssisted/comments/13xf8pq/make_powerpoint_presentations_with_chatgpt/) - [Meet Claude: Anthropic’s Rival to ChatGPT](https://scale.com/blog/chatgpt-vs-claude) - [Methods of prompt programming](https://generative.ink/posts/methods-of-prompt-programming) - [Mysteries of mode collapse](https://www.lesswrong.com/posts/t9svvNPNmFf5Qa3TA/mysteries-of-mode-collapse) - [NLP for Text-to-Image Generators: Prompt Analysis](https://heartbeat.comet.ml/nlp-for-text-to-image-generators-prompt-analysis-part-1-5076a44d8365) -- [NLP with Deep Learning CS224N/Ling284 - Lecture 11: Promting, Instruction Tuning, and RLHF](http://web.stanford.edu/class/cs224n/slides/cs224n-2023-lecture11-prompting-rlhf.pdf) +- [NLP with Deep Learning CS224N/Ling284 - Lecture 11: Prompting, Instruction Tuning, and RLHF](http://web.stanford.edu/class/cs224n/slides/cs224n-2023-lecture11-prompting-rlhf.pdf) - [Notes for Prompt Engineering by sw-yx](https://github.com/sw-yx/ai-notes) +- [On pitfalls (and advantages) of sophisticated large language models](https://arxiv.org/abs/2303.17511) - [OpenAI Cookbook](https://github.com/openai/openai-cookbook) - [OpenAI Prompt Examples for several applications](https://platform.openai.com/examples) -- [Pretrain, Prompt, Predict - A New Paradigm for NLP](http://pretrain.nlpedia.ai) +- [Pretrain, Prompt, Predict -  A New Paradigm for NLP](http://pretrain.nlpedia.ai) - [Prompt Engineer: Tech's hottest job title?](https://www.peoplematters.in/article/talent-management/is-prompt-engineering-the-hottest-job-in-ai-today-37036) -- [Prompt Engineering by Lilian Weng](https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/) +- [Prompt Engineering - A new profession ?](https://www.youtube.com/watch?v=w102J3_9Bcs&ab_channel=PatrickDebois) - [Prompt Engineering 101 - Introduction and resources](https://www.linkedin.com/pulse/prompt-engineering-101-introduction-resources-amatriain) - [Prompt Engineering 101: Autocomplete, Zero-shot, One-shot, and Few-shot prompting](https://youtube.com/watch?v=v2gD8BHOaX4&feature=shares) - [Prompt Engineering 101](https://humanloop.com/blog/prompt-engineering-101) -- [Prompt Engineering - A new profession ?](https://www.youtube.com/watch?v=w102J3_9Bcs&ab_channel=PatrickDebois) +- [Prompt Engineering 201: Advanced prompt engineering and toolkits](https://amatriain.net/blog/prompt201) - [Prompt Engineering by co:here](https://docs.cohere.ai/docs/prompt-engineering) +- [Prompt Engineering by Lilian Weng](https://lilianweng.github.io/posts/2023-03-15-prompt-engineering/) - [Prompt Engineering by Microsoft](https://microsoft.github.io/prompt-engineering) -- [Prompt Engineering: The Career of Future](https://shubhamsaboo111.medium.com/prompt-engineering-the-career-of-future-2fb93f90f117) - [Prompt engineering davinci-003 on our own docs for automated support (Part I)](https://www.patterns.app/blog/2022/12/21/finetune-llm-tech-support) - [Prompt Engineering Guide: How to Engineer the Perfect Prompts](https://richardbatt.co.uk/prompt-engineering-guide-how-to-engineer-the-perfect-prompts) - [Prompt Engineering in GPT-3](https://www.analyticsvidhya.com/blog/2022/05/prompt-engineering-in-gpt-3) - [Prompt Engineering Template](https://docs.google.com/spreadsheets/d/1-snKDn38-KypoYCk9XLPg799bHcNFSBAVu2HVvFEAkA/edit#gid=0) - [Prompt Engineering Topic by GitHub](https://github.com/topics/prompt-engineering) -- [Prompt Engineering: The Ultimate Guide 2023 [GPT-3 & ChatGPT]](https://businessolution.org/prompt-engineering/) -- [Prompt Engineering: From Words to Art](https://www.saxifrage.xyz/post/prompt-engineering) - [Prompt Engineering with OpenAI's GPT-3 and other LLMs](https://youtube.com/watch?v=BP9fi_0XTlw&feature=shares) +- [Prompt Engineering: From Words to Art](https://www.saxifrage.xyz/post/prompt-engineering) +- [Prompt Engineering: The Career of Future](https://shubhamsaboo111.medium.com/prompt-engineering-the-career-of-future-2fb93f90f117) +- [Prompt Engineering: The Ultimate Guide 2023 [GPT-3 & ChatGPT]](https://businessolution.org/prompt-engineering/) - [Prompt injection attacks against GPT-3](https://simonwillison.net/2022/Sep/12/prompt-injection) - [Prompt injection to read out the secret OpenAI API key](https://twitter.com/ludwig_stumpp/status/1619701277419794435?s=20&t=GtoMlmYCSt-UmvjqJVbBSA) -- [Prompting: Better Ways of Using Language Models for NLP Tasks](https://thegradient.pub/prompting/) - [Prompting for Few-shot Learning](https://www.cs.princeton.edu/courses/archive/fall22/cos597G/lectures/lec05.pdf) - [Prompting in NLP: Prompt-based zero-shot learning](https://savasy-22028.medium.com/prompting-in-nlp-prompt-based-zero-shot-learning-3f34bfdb2b72) - [Prompting Methods with Language Models and Their Applications to Weak Supervision](https://snorkel.ai/prompting-methods-with-language-models-nlp) +- [Prompting: Better Ways of Using Language Models for NLP Tasks](https://thegradient.pub/prompting/) - [Prompts as Programming by Gwern](https://www.gwern.net/GPT-3#prompts-as-programming) - [Prompts for communicators using the new AI-powered Bing](https://blogs.microsoft.com/blog/2023/03/16/prompts-for-communicators-using-the-new-ai-powered-bing/) -- [Reverse Prompt Engineering for Fun and (no) Profit](https://lspace.swyx.io/p/reverse-prompt-eng) - [Retrieving Multimodal Information for Augmented Generation: A Survey](https://arxiv.org/pdf/2303.10868.pdf) -- [So you want to be a prompt engineer: Critical careers of the future](https://venturebeat.com/ai/so-you-want-to-be-a-prompt-engineer-critical-careers-of-the-future/) +- [Reverse Prompt Engineering for Fun and (no) Profit](https://lspace.swyx.io/p/reverse-prompt-eng) - [Simulators](https://www.lesswrong.com/posts/vJFdjigzmcXMhNTsx/simulators) +- [So you want to be a prompt engineer: Critical careers of the future](https://venturebeat.com/ai/so-you-want-to-be-a-prompt-engineer-critical-careers-of-the-future/) - [Start with an Instruction](https://beta.openai.com/docs/quickstart/start-with-an-instruction) - [Talking to machines: prompt engineering & injection](https://artifact-research.com/artificial-intelligence/talking-to-machines-prompt-engineering-injection) - [Tech’s hottest new job: AI whisperer. No coding required](https://www.washingtonpost.com/technology/2023/02/25/prompt-engineers-techs-next-big-job/) - [The Book - Fed Honeypot](https://fedhoneypot.notion.site/25fdbdb69e9e44c6877d79e18336fe05?v=1d2bf4143680451986fd2836a04afbf4) -- [The ChatGPT Prompt Book](https://docs.google.com/presentation/d/17b_ocq-GL5lhV_bYSShzUgxL02mtWDoiw9xEroJ5m3Q/edit#slide=id.gc6f83aa91_0_79) - [The ChatGPT list of lists: A collection of 3000+ prompts, examples, use-cases, tools, APIs, extensions, fails and other resources](https://medium.com/mlearning-ai/the-chatgpt-list-of-lists-a-collection-of-1500-useful-mind-blowing-and-strange-use-cases-8b14c35eb) -- [The Most Important Job Skill of This Century](https://www.theatlantic.com/technology/archive/2023/02/openai-text-models-google-search-engine-bard-chatbot-chatgpt-prompt-writing/672991/) +- [The ChatGPT Prompt Book](https://docs.google.com/presentation/d/17b_ocq-GL5lhV_bYSShzUgxL02mtWDoiw9xEroJ5m3Q/edit#slide=id.gc6f83aa91_0_79) - [The Mirror of Language](https://deepfates.com/the-mirror-of-language) +- [The Most Important Job Skill of This Century](https://www.theatlantic.com/technology/archive/2023/02/openai-text-models-google-search-engine-bard-chatbot-chatgpt-prompt-writing/672991/) - [The Waluigi Effect (mega-post)](https://www.lesswrong.com/posts/D7PumeYTDPfBTp3i7/the-waluigi-effect-mega-post) - [Thoughts and impressions of AI-assisted search from Bing](https://simonwillison.net/2023/Feb/24/impressions-of-bing/) - [Unleash Your Creativity with Generative AI: Learn How to Build Innovative Products!](https://youtube.com/watch?v=jqTkMpziGBU&feature=shares) - [Unlocking Creativity with Prompt Engineering](https://youtube.com/watch?v=PFsbWAC4_rk&feature=shares) - [Using GPT-Eliezer against ChatGPT Jailbreaking](https://www.alignmentforum.org/posts/pNcFYZnPdXyL2RfgA/using-gpt-eliezer-against-chatgpt-jailbreaking) - [What Is ChatGPT Doing … and Why Does It Work?](https://writings.stephenwolfram.com/2023/02/what-is-chatgpt-doing-and-why-does-it-work/) +- [Why is ChatGPT so good?](https://scale.com/blog/chatgpt-reinforcement-learning) diff --git a/pages/research.en.mdx b/pages/research.en.mdx index b672dfff0..9a11dfda2 100644 --- a/pages/research.en.mdx +++ b/pages/research.en.mdx @@ -4,9 +4,9 @@ import {Cards, Card} from 'nextra-theme-docs' import {FilesIcon} from 'components/icons' import ContentFileNames from 'components/ContentFileNames' -In this section, we regularly highlight miscellaneous and interesting research findings about how to better work with large language models (LLMs). It include new tips, insights and developments around important LLM research areas such as scaling, agents, efficiency, hallucination, architectures, prompt injection, and much more. +在本栏目中,我们将定期展示关于一些多样且富有趣味性,旨在研究如何更高效地使用大型语言模型(LLMs)的研究成果。内容涵盖重要研究领域的最新技巧、洞见与进展,例如模型扩展性、智能体、计算效率、幻觉现象、架构设计、提示注入等诸多方面。 -LLM research and AI research in general is moving fast so we hope that this resource can help both researchers and developers stay ahead of important developments. We also welcome contributions to this section if you would like to highlight an exciting finding about your research or experiments. +鉴于 LLM 研究及人工智能领域整体发展迅速,我们希望这一资源能够帮助研究人员与开发者把握前沿动态,保持领先。如果您希望分享自身研究或实验中的有趣发现,我们也诚挚欢迎您的投稿与参与。 - + diff --git a/pages/research.zh.mdx b/pages/research.zh.mdx index 9be879632..02b066a4c 100644 --- a/pages/research.zh.mdx +++ b/pages/research.zh.mdx @@ -1,5 +1,7 @@ # LLM Research Findings +import {Cards, Card} from 'nextra-theme-docs' +import {FilesIcon} from 'components/icons' import ContentFileNames from 'components/ContentFileNames' This page needs a translation! Feel free to contribute a translation by clicking the `Edit this page` button on the right. diff --git a/pages/risks.zh.mdx b/pages/risks.zh.mdx index edb7a9d88..4b603cab5 100644 --- a/pages/risks.zh.mdx +++ b/pages/risks.zh.mdx @@ -1,6 +1,8 @@ # 风险和误用 import { Callout } from 'nextra-theme-docs' +import {Cards, Card} from 'nextra-theme-docs' +import {FilesIcon} from 'components/icons' import ContentFileNames from 'components/ContentFileNames' diff --git a/pages/techniques.zh.mdx b/pages/techniques.zh.mdx index 5277c8ad1..a2ab968e0 100644 --- a/pages/techniques.zh.mdx +++ b/pages/techniques.zh.mdx @@ -1,5 +1,7 @@ # 提示技术 +import {Cards, Card} from 'nextra-theme-docs' +import { CardsIcon, OneIcon, WarningIcon, FilesIcon} from 'components/icons' import ContentFileNames from 'components/ContentFileNames' 时至今日,改进提示词显然有助于在不同任务上获得更好的结果。这就是提示工程背后的整个理念。 diff --git a/pages/tools.zh.mdx b/pages/tools.zh.mdx index 8ef15d89e..63cafa6dd 100644 --- a/pages/tools.zh.mdx +++ b/pages/tools.zh.mdx @@ -5,16 +5,23 @@ - [ActionSchema](https://actionschema.com) - [Agenta](https://github.com/Agenta-AI/agenta) - [AI Test Kitchen](https://aitestkitchen.withgoogle.com) +- [AnySolve](https://www.anysolve.ai) +- [AnythingLLM](https://github.com/Mintplex-Labs/anything-llm) - [betterprompt](https://github.com/stjordanis/betterprompt) - [Chainlit](https://github.com/chainlit/chainlit) - [ChatGPT Prompt Generator](https://huggingface.co/spaces/merve/ChatGPT-prompt-generator) - [ClickPrompt](https://github.com/prompt-engineering/click-prompt) -- [Dify](https://dify.ai/) - [DreamStudio](https://beta.dreamstudio.ai) +- [Dify](https://dify.ai/) - [DUST](https://dust.tt) - [Dyno](https://trydyno.com) - [EmergentMind](https://www.emergentmind.com) - [EveryPrompt](https://www.everyprompt.com) +- [FlowGPT](https://flowgpt.com) +- [fastRAG](https://github.com/IntelLabs/fastRAG) +- [Google AI Studio](https://ai.google.dev/) +- [Guardrails](https://github.com/ShreyaR/guardrails) +- [Guidance](https://github.com/microsoft/guidance) - [GPT Index](https://github.com/jerryjliu/gpt_index) - [GPTTools](https://gpttools.com/comparisontool) - [hwchase17/adversarial-prompts](https://github.com/hwchase17/adversarial-prompts) @@ -25,16 +32,25 @@ - [LangSmith](https://docs.smith.langchain.com) - [Lexica](https://lexica.art) - [LMFlow](https://github.com/OptimalScale/LMFlow) +- [LM Studio](https://lmstudio.ai/) - [loom](https://github.com/socketteer/loom) - [Metaprompt](https://metaprompt.vercel.app/?task=gpt) +- [ollama](https://github.com/jmorganca/ollama) - [OpenAI Playground](https://beta.openai.com/playground) - [OpenICL](https://github.com/Shark-NLP/OpenICL) - [OpenPrompt](https://github.com/thunlp/OpenPrompt) - [OpenPlayground](https://nat.dev/) +- [OptimusPrompt](https://www.optimusprompt.ai) +- [Outlines](https://github.com/normal-computing/outlines) - [Playground](https://playgroundai.com) +- [Portkey AI](https://portkey.ai/) - [Prodia](https://app.prodia.com/#/) +- [Prompt Apps](https://chatgpt-prompt-apps.com/) +- [PromptAppGPT](https://github.com/mleoking/PromptAppGPT) - [Prompt Base](https://promptbase.com) +- [PromptBench](https://github.com/microsoft/promptbench) - [Prompt Engine](https://github.com/microsoft/prompt-engine) +- [prompted.link](https://prompted.link) - [Prompter](https://prompter.engineer) - [PromptInject](https://github.com/agencyenterprise/PromptInject) - [Prompts.ai](https://github.com/sevazhidkov/prompts-ai) @@ -42,8 +58,11 @@ - [PromptPerfect](https://promptperfect.jina.ai/) - [Promptly](https://trypromptly.com/) - [PromptSource](https://github.com/bigscience-workshop/promptsource) -- [PromptTools 工具](https://github.com/hegelai/prompttools) +- [PromptTools](https://github.com/hegelai/prompttools) - [Scale SpellBook](https://scale.com/spellbook) - [sharegpt](https://sharegpt.com) +- [SmartGPT](https://getsmartgpt.com) - [ThoughtSource](https://github.com/OpenBioLink/ThoughtSource) - [Visual Prompt Builder](https://tools.saxifrage.xyz/prompt) +- [Wordware](https://www.wordware.ai) +- [YiVal](https://github.com/YiVal/YiVal) From c4fad992ab3fd0e0844415fb673e88e72acbdca9 Mon Sep 17 00:00:00 2001 From: Moemu Date: Tue, 6 May 2025 09:55:57 +0800 Subject: [PATCH 02/29] Update course.zh.mdx --- pages/course.zh.mdx | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pages/course.zh.mdx b/pages/course.zh.mdx index 2a471216a..fb2973dfc 100644 --- a/pages/course.zh.mdx +++ b/pages/course.zh.mdx @@ -3,19 +3,20 @@ import { Callout } from 'nextra/components' -We are excited to launch two new prompt engineering courses. Get access by joining our DAIR.AI Academy. [Join now!](https://dair-ai.thinkific.com/) -Use code PROMPTING20 to get an extra 20% off. +我们很荣幸地宣布两门全新的提示工程课程。加入 DAIR.AI 学院,即可获取课程访问权限。 [立即加入](https://dair-ai.thinkific.com/) -IMPORTANT: The discount is limited to the first 500 students. +使用优惠码 PROMPTING20 可额外享受 20% 折扣。 + +重要提示: 此优惠仅限前 500 名学员使用。 -These hands-on courses are built to compliment this prompt engineering guide. They are designed to help expand your skills and knowledge by teaching you how to effectively apply the concepts learned in this guide to real-world use cases and applications. +作为《提示工程指南》的配套学习资源,这些实践性课程旨在帮助学习者将指南中所学的概念有效应用于真实世界的用例与场景中,从而进一步拓展相关技能与知识体系。 -[Elvis Saravia](https://www.linkedin.com/in/omarsar/), who has worked at companies like Meta AI and Elastic, and has years of experience in AI and LLMs, is the instructor for both courses. +课程由 [Elvis Saravia](https://www.linkedin.com/in/omarsar/) 讲授,他曾就职于 Meta AI 与 Elastic,具备多年人工智能与大语言模型(LLMs)领域的丰富经验。 -Our past learners range from software engineers to AI researchers and practitioners in organizations like Microsoft, Google, Apple, Airbnb, LinkedIn, Amazon, JPMorgan Chase & Co., Asana, Intuit, Fidelity Investments, Coinbase, Guru, and many others. +以往参与学习的学员涵盖软件工程师、AI 研究人员及相关从业者,所属机构包括 Microsoft、Google、Apple、Airbnb、LinkedIn、Amazon、JPMorgan Chase & Co.、Asana、Intuit、Fidelity Investments、Coinbase、Guru 等众多知名企业。 -Reach out to training@dair.ai for any questions about the courses. +如有课程相关疑问,请联系: [training@dair.ai](mailto:training@dair.ai) 。 From 6ee17102c529d3cafebff9bc34bc9992c44d8427 Mon Sep 17 00:00:00 2001 From: Moemu Date: Tue, 6 May 2025 09:57:34 +0800 Subject: [PATCH 03/29] Update _meta.zh.json --- pages/_meta.zh.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pages/_meta.zh.json b/pages/_meta.zh.json index 337151b1c..5ae50630b 100644 --- a/pages/_meta.zh.json +++ b/pages/_meta.zh.json @@ -6,7 +6,7 @@ "prompts": "Prompt Hub", "models": "模型", "risks": "风险和误用", - "research": "LLM Research Findings", + "research": "LLM 研究发现", "papers": "论文", "tools": "工具和库", "notebooks": "Prompt Engineering 笔记本", From 8abb30b750fa30dc8715519d9a01283846d26117 Mon Sep 17 00:00:00 2001 From: Moemu Date: Tue, 6 May 2025 12:10:39 +0800 Subject: [PATCH 04/29] Update meta --- pages/applications/_meta.zh.json | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pages/applications/_meta.zh.json b/pages/applications/_meta.zh.json index 4aa7718e5..b890f3878 100644 --- a/pages/applications/_meta.zh.json +++ b/pages/applications/_meta.zh.json @@ -1,6 +1,11 @@ { + "finetuning-gpt4o": "GPT-4o 微调", + "function_calling": "函数调用", + "context-caching": "LLMs 上下文缓存机制研究", "generating": "生成数据", - "coding": "Generating Code", + "synthetic_rag": "生成面向 RAG 的合成数据集", + "generating_textbooks": "处理合成数据集的多样性", + "coding": "代码生成", "workplace_casestudy": "毕业生工作分类案例研究", "pf": "提示函数" -} \ No newline at end of file +} From 9382360e54bf6f48a563e76e25ef5309a81a0c89 Mon Sep 17 00:00:00 2001 From: Moemu Date: Tue, 6 May 2025 12:11:04 +0800 Subject: [PATCH 05/29] Update Translation --- pages/applications/function_calling.zh.mdx | 53 ++++++- pages/applications/generating.zh.mdx | 10 +- .../applications/generating_textbooks.zh.mdx | 143 ++++++++++++++++++ 3 files changed, 203 insertions(+), 3 deletions(-) create mode 100644 pages/applications/generating_textbooks.zh.mdx diff --git a/pages/applications/function_calling.zh.mdx b/pages/applications/function_calling.zh.mdx index 6c4c8f64a..9b30c2c9a 100644 --- a/pages/applications/function_calling.zh.mdx +++ b/pages/applications/function_calling.zh.mdx @@ -1,7 +1,17 @@ # 在LLM中调用函数 +import {Cards, Card, Callout} from 'nextra-theme-docs' +import {CodeIcon} from 'components/icons' + ## 调用函数 +